Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- deepseek/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so +3 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/__pycache__/cublas.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/__pycache__/cusolver.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/__pycache__/cusparse.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/_core/__init__.py +79 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/_core/_codeblock.py +38 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/_core/syncdetect.py +68 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/_math/__pycache__/explog.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/_math/__pycache__/ufunc.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__init__.py +202 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/compiler.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/cudnn.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/cutensor.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/nccl.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/nvtx.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/profiler.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/runtime.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/compiler.py +991 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cudnn.py +17 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cufft.pxd +99 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cufft.pyx +1205 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cupy_cub.cu +1189 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cupy_cufft.h +324 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cupy_cufftXt.cu +68 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cupy_cufftXt.h +10 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cupy_thrust.cu +526 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cutensor.py +14 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/__init__.py +6 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/__pycache__/debug_print.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/__pycache__/line_profile.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/debug_print.py +78 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/line_profile.py +171 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/nccl.py +17 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/nvtx.py +1 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/profiler.py +3 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/runtime.py +1 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/fft/__init__.py +19 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/fft/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/fft/__pycache__/_fft.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/fft/__pycache__/config.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/fft/config.py +61 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/lib/__init__.py +1 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/lib/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/lib/__pycache__/_routines_poly.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/lib/__pycache__/_shape_base.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/cupy/lib/__pycache__/stride_tricks.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1791,3 +1791,5 @@ infer_4_30_0/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-
|
|
| 1791 |
infer_4_30_0/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1792 |
infer_4_30_0/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1793 |
evalkit_tf437/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 1791 |
infer_4_30_0/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1792 |
infer_4_30_0/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1793 |
evalkit_tf437/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text
|
| 1794 |
+
deepseek/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
|
| 1795 |
+
infer_4_30_0/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
deepseek/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:02f46e8430411ff6e43340722e6c4957554f731833ebcdd00b59c0ea86c412fe
|
| 3 |
+
size 96295585
|
infer_4_30_0/lib/python3.10/site-packages/cupy/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (28.1 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/__pycache__/cublas.cpython-310.pyc
ADDED
|
Binary file (20.6 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/__pycache__/cusolver.cpython-310.pyc
ADDED
|
Binary file (316 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/__pycache__/cusparse.cpython-310.pyc
ADDED
|
Binary file (316 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/_core/__init__.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
from cupy._core import core # NOQA
|
| 4 |
+
from cupy._core import fusion # NOQA
|
| 5 |
+
from cupy._core import internal # NOQA
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# internal APIs for testing and development
|
| 9 |
+
from cupy._core._accelerator import set_elementwise_accelerators # NOQA
|
| 10 |
+
from cupy._core._accelerator import set_reduction_accelerators # NOQA
|
| 11 |
+
from cupy._core._accelerator import set_routine_accelerators # NOQA
|
| 12 |
+
from cupy._core._accelerator import get_elementwise_accelerators # NOQA
|
| 13 |
+
from cupy._core._accelerator import get_reduction_accelerators # NOQA
|
| 14 |
+
from cupy._core._accelerator import get_routine_accelerators # NOQA
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# import class and function
|
| 18 |
+
from cupy._core._kernel import create_ufunc # NOQA
|
| 19 |
+
from cupy._core._kernel import ElementwiseKernel # NOQA
|
| 20 |
+
from cupy._core._kernel import ufunc # NOQA
|
| 21 |
+
from cupy._core._kernel import _get_warpsize # NOQA
|
| 22 |
+
from cupy._core._reduction import create_reduction_func # NOQA
|
| 23 |
+
from cupy._core._reduction import ReductionKernel # NOQA
|
| 24 |
+
from cupy._core._routines_binary import bitwise_and # NOQA
|
| 25 |
+
from cupy._core._routines_binary import bitwise_or # NOQA
|
| 26 |
+
from cupy._core._routines_binary import bitwise_xor # NOQA
|
| 27 |
+
from cupy._core._routines_binary import invert # NOQA
|
| 28 |
+
from cupy._core._routines_binary import left_shift # NOQA
|
| 29 |
+
from cupy._core._routines_binary import right_shift # NOQA
|
| 30 |
+
from cupy._core._routines_linalg import _mat_ptrs # NOQA
|
| 31 |
+
from cupy._core._routines_linalg import dot # NOQA
|
| 32 |
+
from cupy._core._routines_linalg import get_compute_type # NOQA
|
| 33 |
+
from cupy._core._routines_linalg import matmul # NOQA
|
| 34 |
+
from cupy._core._routines_linalg import set_compute_type # NOQA
|
| 35 |
+
from cupy._core._routines_linalg import tensordot_core # NOQA
|
| 36 |
+
from cupy._core._routines_logic import create_comparison # NOQA
|
| 37 |
+
from cupy._core._routines_logic import equal # NOQA
|
| 38 |
+
from cupy._core._routines_logic import greater # NOQA
|
| 39 |
+
from cupy._core._routines_logic import greater_equal # NOQA
|
| 40 |
+
from cupy._core._routines_logic import less # NOQA
|
| 41 |
+
from cupy._core._routines_logic import less_equal # NOQA
|
| 42 |
+
from cupy._core._routines_logic import not_equal # NOQA
|
| 43 |
+
from cupy._core._routines_manipulation import array_split # NOQA
|
| 44 |
+
from cupy._core._routines_manipulation import broadcast # NOQA
|
| 45 |
+
from cupy._core._routines_manipulation import broadcast_to # NOQA
|
| 46 |
+
from cupy._core._routines_manipulation import concatenate_method # NOQA
|
| 47 |
+
from cupy._core._routines_manipulation import moveaxis # NOQA
|
| 48 |
+
from cupy._core._routines_manipulation import rollaxis # NOQA
|
| 49 |
+
from cupy._core._routines_manipulation import size # NOQA
|
| 50 |
+
from cupy._core._routines_math import absolute # NOQA
|
| 51 |
+
from cupy._core._routines_math import add # NOQA
|
| 52 |
+
from cupy._core._routines_math import angle, angle_deg # NOQA
|
| 53 |
+
from cupy._core._routines_math import conjugate # NOQA
|
| 54 |
+
from cupy._core._routines_math import divide # NOQA
|
| 55 |
+
from cupy._core._routines_math import floor_divide # NOQA
|
| 56 |
+
from cupy._core._routines_math import multiply # NOQA
|
| 57 |
+
from cupy._core._routines_math import negative # NOQA
|
| 58 |
+
from cupy._core._routines_math import positive # NOQA
|
| 59 |
+
from cupy._core._routines_math import power # NOQA
|
| 60 |
+
from cupy._core._routines_math import remainder # NOQA
|
| 61 |
+
from cupy._core._routines_math import sqrt # NOQA
|
| 62 |
+
from cupy._core._routines_math import subtract # NOQA
|
| 63 |
+
from cupy._core._routines_math import true_divide # NOQA
|
| 64 |
+
from cupy._core._routines_statistics import nanmax # NOQA
|
| 65 |
+
from cupy._core._routines_statistics import nanmin # NOQA
|
| 66 |
+
from cupy._core.core import _internal_ascontiguousarray # NOQA
|
| 67 |
+
from cupy._core.core import _internal_asfortranarray # NOQA
|
| 68 |
+
from cupy._core.core import array # NOQA
|
| 69 |
+
from cupy._core.core import ascontiguousarray # NOQA
|
| 70 |
+
from cupy._core.core import asfortranarray # NOQA
|
| 71 |
+
from cupy._core.core import divmod # NOQA
|
| 72 |
+
from cupy._core.core import elementwise_copy # NOQA
|
| 73 |
+
from cupy._core.core import ndarray # NOQA
|
| 74 |
+
from cupy._core.dlpack import fromDlpack # NOQA
|
| 75 |
+
from cupy._core.dlpack import from_dlpack # NOQA
|
| 76 |
+
from cupy._core.internal import complete_slice # NOQA
|
| 77 |
+
from cupy._core.internal import get_size # NOQA
|
| 78 |
+
from cupy._core.raw import RawKernel # NOQA
|
| 79 |
+
from cupy._core.raw import RawModule # NOQA
|
infer_4_30_0/lib/python3.10/site-packages/cupy/_core/_codeblock.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, List
|
| 2 |
+
|
| 3 |
+
_CodeType = Any # TODO(asi1024): Correct type annotation
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class CodeBlock:
|
| 7 |
+
"""Code fragment for the readable format.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
def __init__(self, head: str, codes: _CodeType) -> None:
|
| 11 |
+
self._head = '' if head == '' else head + ' '
|
| 12 |
+
self._codes = codes
|
| 13 |
+
|
| 14 |
+
def _to_str_list(self, indent_width: int = 0) -> List[str]:
|
| 15 |
+
codes: List[str] = []
|
| 16 |
+
codes.append(' ' * indent_width + self._head + '{')
|
| 17 |
+
for code in self._codes:
|
| 18 |
+
next_indent_width = indent_width + 2
|
| 19 |
+
if isinstance(code, str):
|
| 20 |
+
codes.append(' ' * next_indent_width + code)
|
| 21 |
+
elif isinstance(code, CodeBlock):
|
| 22 |
+
codes += code._to_str_list(indent_width=next_indent_width)
|
| 23 |
+
else:
|
| 24 |
+
assert False
|
| 25 |
+
codes.append(' ' * indent_width + '}')
|
| 26 |
+
return codes
|
| 27 |
+
|
| 28 |
+
def __str__(self) -> str:
|
| 29 |
+
"""Emit CUDA program like the following format.
|
| 30 |
+
|
| 31 |
+
<<head>> {
|
| 32 |
+
<<begin codes>>
|
| 33 |
+
...;
|
| 34 |
+
<<end codes>>
|
| 35 |
+
}
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
return '\n'.join(self._to_str_list())
|
infer_4_30_0/lib/python3.10/site-packages/cupy/_core/syncdetect.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import threading
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
_thread_local = threading.local()
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class DeviceSynchronized(RuntimeError):
|
| 10 |
+
"""Raised when device synchronization is detected while disallowed.
|
| 11 |
+
|
| 12 |
+
.. warning::
|
| 13 |
+
|
| 14 |
+
This API has been deprecated in CuPy v10 and will be removed in future
|
| 15 |
+
releases.
|
| 16 |
+
|
| 17 |
+
.. seealso:: :func:`cupyx.allow_synchronize`
|
| 18 |
+
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, message=None):
|
| 22 |
+
if message is None:
|
| 23 |
+
message = 'Device synchronization was detected while disallowed.'
|
| 24 |
+
super().__init__(message)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _is_allowed():
|
| 28 |
+
# Returns whether device synchronization is allowed in the current thread.
|
| 29 |
+
try:
|
| 30 |
+
return _thread_local.allowed
|
| 31 |
+
except AttributeError:
|
| 32 |
+
_thread_local.allowed = True
|
| 33 |
+
return True
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _declare_synchronize():
|
| 37 |
+
# Raises DeviceSynchronized if device synchronization is disallowed in
|
| 38 |
+
# the current thread.
|
| 39 |
+
if not _is_allowed():
|
| 40 |
+
raise DeviceSynchronized()
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@contextlib.contextmanager
|
| 44 |
+
def allow_synchronize(allow):
|
| 45 |
+
"""Allows or disallows device synchronization temporarily in the current \
|
| 46 |
+
thread.
|
| 47 |
+
|
| 48 |
+
.. warning::
|
| 49 |
+
|
| 50 |
+
This API has been deprecated in CuPy v10 and will be removed in future
|
| 51 |
+
releases.
|
| 52 |
+
|
| 53 |
+
If device synchronization is detected, :class:`cupyx.DeviceSynchronized`
|
| 54 |
+
will be raised.
|
| 55 |
+
|
| 56 |
+
Note that there can be false negatives and positives.
|
| 57 |
+
Device synchronization outside CuPy will not be detected.
|
| 58 |
+
"""
|
| 59 |
+
warnings.warn(
|
| 60 |
+
'cupyx.allow_synchronize will be removed in future releases as it '
|
| 61 |
+
'is not possible to reliably detect synchronizations.')
|
| 62 |
+
|
| 63 |
+
old = _is_allowed()
|
| 64 |
+
_thread_local.allowed = allow
|
| 65 |
+
try:
|
| 66 |
+
yield
|
| 67 |
+
finally:
|
| 68 |
+
_thread_local.allowed = old
|
infer_4_30_0/lib/python3.10/site-packages/cupy/_math/__pycache__/explog.cpython-310.pyc
ADDED
|
Binary file (1.97 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/_math/__pycache__/ufunc.cpython-310.pyc
ADDED
|
Binary file (644 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__init__.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
import cupy as _cupy
|
| 5 |
+
from cupy._environment import get_cuda_path # NOQA
|
| 6 |
+
from cupy._environment import get_nvcc_path # NOQA
|
| 7 |
+
from cupy._environment import get_rocm_path # NOQA
|
| 8 |
+
from cupy._environment import get_hipcc_path # NOQA
|
| 9 |
+
from cupy.cuda import compiler # NOQA
|
| 10 |
+
from cupy.cuda import device # NOQA
|
| 11 |
+
from cupy.cuda import function # NOQA
|
| 12 |
+
from cupy.cuda import memory # NOQA
|
| 13 |
+
from cupy.cuda import memory_hook # NOQA
|
| 14 |
+
from cupy.cuda import memory_hooks # NOQA
|
| 15 |
+
from cupy.cuda import pinned_memory # NOQA
|
| 16 |
+
from cupy.cuda import profiler # NOQA
|
| 17 |
+
from cupy.cuda import stream # NOQA
|
| 18 |
+
from cupy.cuda import texture # NOQA
|
| 19 |
+
from cupy_backends.cuda.api import driver # NOQA
|
| 20 |
+
from cupy_backends.cuda.api import runtime # NOQA
|
| 21 |
+
from cupy_backends.cuda.libs import nvrtc # NOQA
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
_available = None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class _UnavailableModule():
|
| 28 |
+
available = False
|
| 29 |
+
|
| 30 |
+
def __init__(self, name):
|
| 31 |
+
self.__name__ = name
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
from cupy.cuda import cub # NOQA
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
try:
|
| 38 |
+
from cupy_backends.cuda.libs import nvtx # NOQA
|
| 39 |
+
except ImportError:
|
| 40 |
+
nvtx = _UnavailableModule('cupy.cuda.nvtx')
|
| 41 |
+
|
| 42 |
+
try:
|
| 43 |
+
from cupy.cuda import thrust # NOQA
|
| 44 |
+
except ImportError:
|
| 45 |
+
thrust = _UnavailableModule('cupy.cuda.thrust')
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def __getattr__(key):
|
| 49 |
+
if key == 'cusolver':
|
| 50 |
+
from cupy_backends.cuda.libs import cusolver
|
| 51 |
+
_cupy.cuda.cusolver = cusolver
|
| 52 |
+
return cusolver
|
| 53 |
+
elif key == 'cusparse':
|
| 54 |
+
from cupy_backends.cuda.libs import cusparse
|
| 55 |
+
_cupy.cuda.cusparse = cusparse
|
| 56 |
+
return cusparse
|
| 57 |
+
elif key == 'curand':
|
| 58 |
+
from cupy_backends.cuda.libs import curand
|
| 59 |
+
_cupy.cuda.curand = curand
|
| 60 |
+
return curand
|
| 61 |
+
elif key == 'cublas':
|
| 62 |
+
from cupy_backends.cuda.libs import cublas
|
| 63 |
+
_cupy.cuda.cublas = cublas
|
| 64 |
+
return cublas
|
| 65 |
+
elif key == 'jitify':
|
| 66 |
+
if not runtime.is_hip and driver.get_build_version() > 0:
|
| 67 |
+
import cupy.cuda.jitify as jitify
|
| 68 |
+
else:
|
| 69 |
+
jitify = _UnavailableModule('cupy.cuda.jitify')
|
| 70 |
+
_cupy.cuda.jitify = jitify
|
| 71 |
+
return jitify
|
| 72 |
+
|
| 73 |
+
# `nvtx_enabled` flags are kept for backward compatibility with Chainer.
|
| 74 |
+
# Note: module-level getattr only runs on Python 3.7+.
|
| 75 |
+
for mod in [nvtx]:
|
| 76 |
+
flag = '{}_enabled'.format(mod.__name__.split('.')[-1])
|
| 77 |
+
if key == flag:
|
| 78 |
+
warnings.warn('''
|
| 79 |
+
cupy.cuda.{} has been deprecated in CuPy v8 and will be removed in the future release.
|
| 80 |
+
Use {}.available instead.
|
| 81 |
+
'''.format(flag, mod.__name__), DeprecationWarning) # NOQA
|
| 82 |
+
return not isinstance(mod, _UnavailableModule)
|
| 83 |
+
|
| 84 |
+
raise AttributeError(
|
| 85 |
+
"module '{}' has no attribute '{}'".format(__name__, key))
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def is_available():
|
| 89 |
+
global _available
|
| 90 |
+
if _available is None:
|
| 91 |
+
_available = False
|
| 92 |
+
try:
|
| 93 |
+
_available = runtime.getDeviceCount() > 0
|
| 94 |
+
except Exception as e:
|
| 95 |
+
if (not runtime.is_hip and e.args[0] !=
|
| 96 |
+
'cudaErrorNoDevice: no CUDA-capable device is detected'):
|
| 97 |
+
raise
|
| 98 |
+
elif runtime.is_hip and 'hipErrorNoDevice' not in e.args[0]:
|
| 99 |
+
raise
|
| 100 |
+
return _available
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def get_local_runtime_version() -> int:
|
| 104 |
+
"""
|
| 105 |
+
Returns the version of the CUDA Runtime installed in the environment.
|
| 106 |
+
|
| 107 |
+
Unlike :func:`cupy.cuda.runtime.runtimeGetVersion`, which returns the
|
| 108 |
+
CUDA Runtime version statically linked to CuPy, this function returns the
|
| 109 |
+
version retrieved from the shared library installed on the host.
|
| 110 |
+
Use this method to probe the CUDA Runtime version installed in the
|
| 111 |
+
environment.
|
| 112 |
+
"""
|
| 113 |
+
return runtime._getLocalRuntimeVersion()
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# import class and function
|
| 117 |
+
from cupy.cuda.device import Device # NOQA
|
| 118 |
+
from cupy.cuda.device import get_cublas_handle # NOQA
|
| 119 |
+
from cupy.cuda.device import get_device_id # NOQA
|
| 120 |
+
from cupy.cuda.function import Function # NOQA
|
| 121 |
+
from cupy.cuda.function import Module # NOQA
|
| 122 |
+
from cupy.cuda.memory import alloc # NOQA
|
| 123 |
+
from cupy.cuda.memory import BaseMemory # NOQA
|
| 124 |
+
from cupy.cuda.memory import malloc_managed # NOQA
|
| 125 |
+
from cupy.cuda.memory import malloc_async # NOQA
|
| 126 |
+
from cupy.cuda.memory import ManagedMemory # NOQA
|
| 127 |
+
from cupy.cuda.memory import Memory # NOQA
|
| 128 |
+
from cupy.cuda.memory import MemoryAsync # NOQA
|
| 129 |
+
from cupy.cuda.memory import MemoryPointer # NOQA
|
| 130 |
+
from cupy.cuda.memory import MemoryPool # NOQA
|
| 131 |
+
from cupy.cuda.memory import MemoryAsyncPool # NOQA
|
| 132 |
+
from cupy.cuda.memory import PythonFunctionAllocator # NOQA
|
| 133 |
+
from cupy.cuda.memory import CFunctionAllocator # NOQA
|
| 134 |
+
from cupy.cuda.memory import set_allocator # NOQA
|
| 135 |
+
from cupy.cuda.memory import get_allocator # NOQA
|
| 136 |
+
from cupy.cuda.memory import UnownedMemory # NOQA
|
| 137 |
+
from cupy.cuda.memory_hook import MemoryHook # NOQA
|
| 138 |
+
from cupy.cuda.pinned_memory import alloc_pinned_memory # NOQA
|
| 139 |
+
from cupy.cuda.pinned_memory import PinnedMemory # NOQA
|
| 140 |
+
from cupy.cuda.pinned_memory import PinnedMemoryPointer # NOQA
|
| 141 |
+
from cupy.cuda.pinned_memory import PinnedMemoryPool # NOQA
|
| 142 |
+
from cupy.cuda.pinned_memory import set_pinned_memory_allocator # NOQA
|
| 143 |
+
from cupy.cuda.stream import Event # NOQA
|
| 144 |
+
from cupy.cuda.stream import get_current_stream # NOQA
|
| 145 |
+
from cupy.cuda.stream import get_elapsed_time # NOQA
|
| 146 |
+
from cupy.cuda.stream import Stream # NOQA
|
| 147 |
+
from cupy.cuda.stream import ExternalStream # NOQA
|
| 148 |
+
from cupy.cuda.graph import Graph # NOQA
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@contextlib.contextmanager
|
| 152 |
+
def using_allocator(allocator=None):
|
| 153 |
+
"""Sets a thread-local allocator for GPU memory inside
|
| 154 |
+
context manager
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
allocator (function): CuPy memory allocator. It must have the same
|
| 158 |
+
interface as the :func:`cupy.cuda.alloc` function, which takes the
|
| 159 |
+
buffer size as an argument and returns the device buffer of that
|
| 160 |
+
size. When ``None`` is specified, raw memory allocator will be
|
| 161 |
+
used (i.e., memory pool is disabled).
|
| 162 |
+
"""
|
| 163 |
+
# Note: cupy/memory.pyx would be the better place to implement this
|
| 164 |
+
# function but `contextmanager` decoration doesn't behave well in Cython.
|
| 165 |
+
if allocator is None:
|
| 166 |
+
allocator = memory._malloc
|
| 167 |
+
previous_allocator = memory._get_thread_local_allocator()
|
| 168 |
+
memory._set_thread_local_allocator(allocator)
|
| 169 |
+
try:
|
| 170 |
+
yield
|
| 171 |
+
finally:
|
| 172 |
+
memory._set_thread_local_allocator(previous_allocator)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@contextlib.contextmanager
|
| 176 |
+
def profile():
|
| 177 |
+
"""Enable CUDA profiling during with statement.
|
| 178 |
+
|
| 179 |
+
This function enables profiling on entering a with statement, and disables
|
| 180 |
+
profiling on leaving the statement.
|
| 181 |
+
|
| 182 |
+
>>> with cupy.cuda.profile():
|
| 183 |
+
... # do something you want to measure
|
| 184 |
+
... pass
|
| 185 |
+
|
| 186 |
+
.. note::
|
| 187 |
+
When starting ``nvprof`` from the command line, manually setting
|
| 188 |
+
``--profile-from-start off`` may be required for the desired behavior.
|
| 189 |
+
|
| 190 |
+
.. warning:: This context manager is deprecated. Please use
|
| 191 |
+
:class:`cupyx.profiler.profile` instead.
|
| 192 |
+
"""
|
| 193 |
+
warnings.warn(
|
| 194 |
+
'cupy.cuda.profile has been deprecated since CuPy v10 '
|
| 195 |
+
'and will be removed in the future. Use cupyx.profiler.profile '
|
| 196 |
+
'instead.')
|
| 197 |
+
|
| 198 |
+
profiler.start()
|
| 199 |
+
try:
|
| 200 |
+
yield
|
| 201 |
+
finally:
|
| 202 |
+
profiler.stop()
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (6.25 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/compiler.cpython-310.pyc
ADDED
|
Binary file (21.4 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/cudnn.cpython-310.pyc
ADDED
|
Binary file (507 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/cutensor.cpython-310.pyc
ADDED
|
Binary file (489 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/nccl.cpython-310.pyc
ADDED
|
Binary file (502 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/nvtx.cpython-310.pyc
ADDED
|
Binary file (208 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/profiler.cpython-310.pyc
ADDED
|
Binary file (283 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/__pycache__/runtime.cpython-310.pyc
ADDED
|
Binary file (213 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/compiler.py
ADDED
|
@@ -0,0 +1,991 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import hashlib
|
| 3 |
+
import math
|
| 4 |
+
import os
|
| 5 |
+
import platform
|
| 6 |
+
import re
|
| 7 |
+
import shutil
|
| 8 |
+
import subprocess
|
| 9 |
+
import sys
|
| 10 |
+
import tempfile
|
| 11 |
+
from typing import Optional
|
| 12 |
+
import warnings
|
| 13 |
+
|
| 14 |
+
from cupy.cuda import device
|
| 15 |
+
from cupy.cuda import function
|
| 16 |
+
from cupy.cuda import get_rocm_path
|
| 17 |
+
from cupy_backends.cuda.api import driver
|
| 18 |
+
from cupy_backends.cuda.api import runtime
|
| 19 |
+
from cupy_backends.cuda.libs import nvrtc
|
| 20 |
+
from cupy import _environment
|
| 21 |
+
from cupy import _util
|
| 22 |
+
|
| 23 |
+
_cuda_hip_version = driver.get_build_version()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_nvrtc_version = None
|
| 27 |
+
_win32 = sys.platform.startswith('win32')
|
| 28 |
+
_rdc_flags = ('--device-c', '-dc', '-rdc=true',
|
| 29 |
+
'--relocatable-device-code=true')
|
| 30 |
+
_cudadevrt = None
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class NVCCException(Exception):
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class HIPCCException(Exception):
|
| 38 |
+
pass
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class JitifyException(Exception):
|
| 42 |
+
pass
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def _run_cc(cmd, cwd, backend, log_stream=None):
|
| 46 |
+
# backend in ('nvcc', 'hipcc')
|
| 47 |
+
try:
|
| 48 |
+
# Inherit the environment variable as NVCC refers to PATH, TMPDIR/TMP,
|
| 49 |
+
# NVCC_PREPEND_FLAGS, NVCC_APPEND_FLAGS.
|
| 50 |
+
env = os.environ
|
| 51 |
+
if _win32:
|
| 52 |
+
# Adds the extra PATH for NVCC invocation.
|
| 53 |
+
# When running NVCC, a host compiler must be available in PATH,
|
| 54 |
+
# but this is not true in general Windows environment unless
|
| 55 |
+
# running inside the SDK Tools command prompt.
|
| 56 |
+
# To mitigate the situation CuPy automatically adds a path to
|
| 57 |
+
# the VC++ compiler (cl.exe) found via setuptools, if it is not
|
| 58 |
+
# on the PATH.
|
| 59 |
+
extra_path = _get_extra_path_for_msvc()
|
| 60 |
+
if extra_path is not None:
|
| 61 |
+
path = extra_path + os.pathsep + os.environ.get('PATH', '')
|
| 62 |
+
env = copy.deepcopy(env)
|
| 63 |
+
env['PATH'] = path
|
| 64 |
+
log = subprocess.check_output(
|
| 65 |
+
cmd, cwd=cwd, env=env,
|
| 66 |
+
stderr=subprocess.STDOUT,
|
| 67 |
+
universal_newlines=True,
|
| 68 |
+
creationflags=(subprocess.CREATE_NO_WINDOW if _win32 else 0))
|
| 69 |
+
if log_stream is not None:
|
| 70 |
+
log_stream.write(log)
|
| 71 |
+
return log
|
| 72 |
+
except subprocess.CalledProcessError as e:
|
| 73 |
+
msg = ('`{0}` command returns non-zero exit status. \n'
|
| 74 |
+
'command: {1}\n'
|
| 75 |
+
'return-code: {2}\n'
|
| 76 |
+
'stdout/stderr: \n'
|
| 77 |
+
'{3}'.format(backend,
|
| 78 |
+
e.cmd,
|
| 79 |
+
e.returncode,
|
| 80 |
+
e.output))
|
| 81 |
+
if backend == 'nvcc':
|
| 82 |
+
raise NVCCException(msg)
|
| 83 |
+
elif backend == 'hipcc':
|
| 84 |
+
raise HIPCCException(msg)
|
| 85 |
+
else:
|
| 86 |
+
raise RuntimeError(msg)
|
| 87 |
+
except OSError as e:
|
| 88 |
+
msg = 'Failed to run `{0}` command. ' \
|
| 89 |
+
'Check PATH environment variable: ' \
|
| 90 |
+
+ str(e)
|
| 91 |
+
raise OSError(msg.format(backend))
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@_util.memoize()
|
| 95 |
+
def _get_extra_path_for_msvc():
|
| 96 |
+
cl_exe = shutil.which('cl.exe')
|
| 97 |
+
if cl_exe:
|
| 98 |
+
# The compiler is already on PATH, no extra path needed.
|
| 99 |
+
return None
|
| 100 |
+
|
| 101 |
+
cl_exe_dir = _get_cl_exe_dir()
|
| 102 |
+
if cl_exe_dir:
|
| 103 |
+
return cl_exe_dir
|
| 104 |
+
|
| 105 |
+
cl_exe_dir = _get_cl_exe_dir_fallback()
|
| 106 |
+
if cl_exe_dir:
|
| 107 |
+
return cl_exe_dir
|
| 108 |
+
|
| 109 |
+
return None
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def _get_cl_exe_dir() -> Optional[str]:
|
| 113 |
+
try:
|
| 114 |
+
try:
|
| 115 |
+
# setuptools.msvc is missing in setuptools v74.0.0.
|
| 116 |
+
# setuptools.msvc requires explicit import in setuptools v74.1.0+.
|
| 117 |
+
import setuptools.msvc
|
| 118 |
+
except Exception:
|
| 119 |
+
return None
|
| 120 |
+
vctools = setuptools.msvc.EnvironmentInfo(platform.machine()).VCTools
|
| 121 |
+
for path in vctools:
|
| 122 |
+
cl_exe = os.path.join(path, 'cl.exe')
|
| 123 |
+
if os.path.exists(cl_exe):
|
| 124 |
+
return path
|
| 125 |
+
warnings.warn(f'cl.exe could not be found in {vctools}')
|
| 126 |
+
except Exception as e:
|
| 127 |
+
warnings.warn(
|
| 128 |
+
f'Failed to find cl.exe with setuptools.msvc: {type(e)}: {e}')
|
| 129 |
+
return None
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _get_cl_exe_dir_fallback() -> Optional[str]:
|
| 133 |
+
# Discover cl.exe without relying on undocumented setuptools.msvc API.
|
| 134 |
+
# As of now this code path exists only for setuptools 74.0.0 (see #8583).
|
| 135 |
+
# N.B. This takes few seconds as this incurs cmd.exe (vcvarsall.bat)
|
| 136 |
+
# invocation.
|
| 137 |
+
try:
|
| 138 |
+
from setuptools import Distribution
|
| 139 |
+
from setuptools.command.build_ext import build_ext
|
| 140 |
+
ext = build_ext(Distribution({'name': 'cupy_cl_exe_discover'}))
|
| 141 |
+
ext.setup_shlib_compiler()
|
| 142 |
+
ext.shlib_compiler.initialize() # MSVCCompiler only
|
| 143 |
+
return os.path.dirname(ext.shlib_compiler.cc)
|
| 144 |
+
except Exception as e:
|
| 145 |
+
warnings.warn(
|
| 146 |
+
f'Failed to find cl.exe with setuptools: {type(e)}: {e}')
|
| 147 |
+
return None
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def _get_nvrtc_version():
|
| 151 |
+
global _nvrtc_version
|
| 152 |
+
if _nvrtc_version is None:
|
| 153 |
+
_nvrtc_version = nvrtc.getVersion()
|
| 154 |
+
|
| 155 |
+
return _nvrtc_version
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@_util.memoize()
|
| 159 |
+
def _get_cupy_cache_key():
|
| 160 |
+
from cupy._core import core
|
| 161 |
+
return core.CUPY_CACHE_KEY
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
# Known archs for Tegra/Jetson/Xavier/etc
|
| 165 |
+
_tegra_archs = ('32', '53', '62', '72', '87')
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
@_util.memoize()
|
| 169 |
+
def _get_max_compute_capability():
|
| 170 |
+
major, minor = _get_nvrtc_version()
|
| 171 |
+
if major < 11:
|
| 172 |
+
# CUDA 10.2
|
| 173 |
+
nvrtc_max_compute_capability = '75'
|
| 174 |
+
elif major == 11 and minor == 0:
|
| 175 |
+
# CUDA 11.0
|
| 176 |
+
nvrtc_max_compute_capability = '80'
|
| 177 |
+
elif major == 11 and minor < 8:
|
| 178 |
+
# CUDA 11.1 - 11.7
|
| 179 |
+
# Note: 87 is for Jetson Orin
|
| 180 |
+
nvrtc_max_compute_capability = '86'
|
| 181 |
+
elif (major == 11 and minor == 8) or (major == 12 and minor < 8):
|
| 182 |
+
# CUDA 11.8, 12.0 - 12.7
|
| 183 |
+
nvrtc_max_compute_capability = '90'
|
| 184 |
+
else:
|
| 185 |
+
# CUDA 12.8+
|
| 186 |
+
nvrtc_max_compute_capability = '120'
|
| 187 |
+
|
| 188 |
+
return nvrtc_max_compute_capability
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
@_util.memoize()
|
| 192 |
+
def _get_extra_include_dir_opts():
|
| 193 |
+
major, minor = _get_nvrtc_version()
|
| 194 |
+
return tuple(
|
| 195 |
+
f'-I{d}'
|
| 196 |
+
for d in _environment._get_include_dir_from_conda_or_wheel(
|
| 197 |
+
major, minor
|
| 198 |
+
)
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
@_util.memoize(for_each_device=True)
|
| 203 |
+
def _get_arch():
|
| 204 |
+
# See Supported Compile Options section of NVRTC User Guide for
|
| 205 |
+
# the maximum value allowed for `--gpu-architecture`.
|
| 206 |
+
nvrtc_max_compute_capability = _get_max_compute_capability()
|
| 207 |
+
|
| 208 |
+
arch = device.Device().compute_capability
|
| 209 |
+
if arch in _tegra_archs:
|
| 210 |
+
return arch
|
| 211 |
+
else:
|
| 212 |
+
return min(arch, nvrtc_max_compute_capability, key=int)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
@_util.memoize(for_each_device=True)
|
| 216 |
+
def _get_arch_for_options_for_nvrtc(arch=None):
|
| 217 |
+
# NVRTC in CUDA 11.3+ generates PTX that cannot be run an earlier driver
|
| 218 |
+
# version than the one included in the used CUDA version, as
|
| 219 |
+
# documented in:
|
| 220 |
+
# https://docs.nvidia.com/cuda/archive/11.3.0/nvrtc/index.html#versioning
|
| 221 |
+
# Here we use `-arch=sm_*` instead of `-arch=compute_*` to directly
|
| 222 |
+
# generate cubin (SASS) instead of PTX. See #5097 for details.
|
| 223 |
+
if arch is None:
|
| 224 |
+
arch = _get_arch()
|
| 225 |
+
if (
|
| 226 |
+
not _use_ptx
|
| 227 |
+
and int(arch) <= int(_get_max_compute_capability())
|
| 228 |
+
):
|
| 229 |
+
return f'-arch=sm_{arch}', 'cubin'
|
| 230 |
+
return f'-arch=compute_{arch}', 'ptx'
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def _is_cudadevrt_needed(options):
|
| 234 |
+
return any(o for o in options if o in _rdc_flags)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def _get_cudadevrt_path():
|
| 238 |
+
global _cudadevrt
|
| 239 |
+
if _cudadevrt is not None:
|
| 240 |
+
return _cudadevrt
|
| 241 |
+
|
| 242 |
+
# defer import to here to avoid circular dependency
|
| 243 |
+
from cupy.cuda import get_cuda_path
|
| 244 |
+
global _win32
|
| 245 |
+
|
| 246 |
+
cudadevrt = get_cuda_path()
|
| 247 |
+
if cudadevrt is None:
|
| 248 |
+
raise RuntimeError('CUDA is not found.')
|
| 249 |
+
|
| 250 |
+
if _win32:
|
| 251 |
+
# rely on os.altsep
|
| 252 |
+
cudadevrt += '/lib/x64/cudadevrt.lib'
|
| 253 |
+
else: # linux & osx: search twice as in cupy/install/build.py
|
| 254 |
+
cudadevrt64 = cudadevrt + '/lib64/libcudadevrt.a'
|
| 255 |
+
if not os.path.isfile(cudadevrt64):
|
| 256 |
+
cudadevrt += '/lib/libcudadevrt.a'
|
| 257 |
+
else:
|
| 258 |
+
cudadevrt = cudadevrt64
|
| 259 |
+
if not os.path.isfile(cudadevrt):
|
| 260 |
+
raise RuntimeError(
|
| 261 |
+
'Relocatable PTX code is requested, but cudadevrt '
|
| 262 |
+
'is not found.')
|
| 263 |
+
return cudadevrt
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def _remove_rdc_option(options):
|
| 267 |
+
return tuple(o for o in options if o not in _rdc_flags)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def _get_bool_env_variable(name, default):
|
| 271 |
+
val = os.environ.get(name)
|
| 272 |
+
if val is None or len(val) == 0:
|
| 273 |
+
return default
|
| 274 |
+
try:
|
| 275 |
+
return int(val) == 1
|
| 276 |
+
except ValueError:
|
| 277 |
+
return False
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
_use_ptx = _get_bool_env_variable('CUPY_COMPILE_WITH_PTX', False)
|
| 281 |
+
_jitify_header_source_map_populated = False
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def _jitify_prep(source, options, cu_path):
|
| 285 |
+
from cupy.cuda import jitify
|
| 286 |
+
|
| 287 |
+
# TODO(leofang): refactor this?
|
| 288 |
+
global _jitify_header_source_map_populated
|
| 289 |
+
if not _jitify_header_source_map_populated:
|
| 290 |
+
from cupy._core import core
|
| 291 |
+
jitify._init_module()
|
| 292 |
+
jitify._add_sources(core._get_header_source_map())
|
| 293 |
+
_jitify_header_source_map_populated = True
|
| 294 |
+
|
| 295 |
+
# jitify requires the 1st line to be the program name
|
| 296 |
+
old_source = source
|
| 297 |
+
source = cu_path + '\n' + source
|
| 298 |
+
|
| 299 |
+
# Upon failure, in addition to throw an error Jitify also prints the log
|
| 300 |
+
# to stdout. In principle we could intercept that by hijacking stdout's
|
| 301 |
+
# file descriptor (tested locally), but the problem is pytest also does
|
| 302 |
+
# the same thing internally, causing strange errors when running the tests.
|
| 303 |
+
# As a result, we currently maintain Jitify's default behavior for easy
|
| 304 |
+
# debugging, and wait for the upstream to address this issue
|
| 305 |
+
# (NVIDIA/jitify#79).
|
| 306 |
+
|
| 307 |
+
try:
|
| 308 |
+
name, options, headers, include_names = jitify.jitify(source, options)
|
| 309 |
+
except Exception as e: # C++ could throw all kinds of errors
|
| 310 |
+
cex = CompileException(str(e), old_source, cu_path, options, 'jitify')
|
| 311 |
+
dump = _get_bool_env_variable(
|
| 312 |
+
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
|
| 313 |
+
if dump:
|
| 314 |
+
cex.dump(sys.stderr)
|
| 315 |
+
raise JitifyException(str(cex)) from e
|
| 316 |
+
assert name == cu_path
|
| 317 |
+
|
| 318 |
+
return options, headers, include_names
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
_has_usedforsecurity = (sys.version_info >= (3, 9))
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def _hash_hexdigest(value):
|
| 325 |
+
if _has_usedforsecurity:
|
| 326 |
+
hashobj = hashlib.sha1(value, usedforsecurity=False)
|
| 327 |
+
else:
|
| 328 |
+
hashobj = hashlib.sha1(value)
|
| 329 |
+
return hashobj.hexdigest()
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
_hash_length = len(_hash_hexdigest(b'')) # 40 for SHA1
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def compile_using_nvrtc(source, options=(), arch=None, filename='kern.cu',
|
| 336 |
+
name_expressions=None, log_stream=None,
|
| 337 |
+
cache_in_memory=False, jitify=False):
|
| 338 |
+
def _compile(
|
| 339 |
+
source, options, cu_path, name_expressions, log_stream, jitify):
|
| 340 |
+
|
| 341 |
+
if not runtime.is_hip:
|
| 342 |
+
arch_opt, method = _get_arch_for_options_for_nvrtc(arch)
|
| 343 |
+
options += (arch_opt,)
|
| 344 |
+
else:
|
| 345 |
+
method = 'ptx'
|
| 346 |
+
|
| 347 |
+
if jitify:
|
| 348 |
+
options, headers, include_names = _jitify_prep(
|
| 349 |
+
source, options, cu_path)
|
| 350 |
+
else:
|
| 351 |
+
headers = include_names = ()
|
| 352 |
+
major_version, minor_version = _get_nvrtc_version()
|
| 353 |
+
if major_version >= 12:
|
| 354 |
+
# Starting with CUDA 12.0, even without using jitify, some
|
| 355 |
+
# tests cause an error if the following option is not included.
|
| 356 |
+
options += ('--device-as-default-execution-space',)
|
| 357 |
+
|
| 358 |
+
prog = _NVRTCProgram(source, cu_path, headers, include_names,
|
| 359 |
+
name_expressions=name_expressions, method=method)
|
| 360 |
+
try:
|
| 361 |
+
compiled_obj, mapping = prog.compile(options, log_stream)
|
| 362 |
+
except CompileException as e:
|
| 363 |
+
dump = _get_bool_env_variable(
|
| 364 |
+
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
|
| 365 |
+
if dump:
|
| 366 |
+
e.dump(sys.stderr)
|
| 367 |
+
raise
|
| 368 |
+
return compiled_obj, mapping
|
| 369 |
+
|
| 370 |
+
if not cache_in_memory:
|
| 371 |
+
with tempfile.TemporaryDirectory() as root_dir:
|
| 372 |
+
cu_path = os.path.join(root_dir, filename)
|
| 373 |
+
|
| 374 |
+
with open(cu_path, 'w') as cu_file:
|
| 375 |
+
cu_file.write(source)
|
| 376 |
+
|
| 377 |
+
return _compile(source, options, cu_path,
|
| 378 |
+
name_expressions, log_stream, jitify)
|
| 379 |
+
else:
|
| 380 |
+
cu_path = '' if not jitify else filename
|
| 381 |
+
return _compile(source, options, cu_path, name_expressions,
|
| 382 |
+
log_stream, jitify)
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def compile_using_nvcc(source, options=(), arch=None,
|
| 386 |
+
filename='kern.cu', code_type='cubin',
|
| 387 |
+
separate_compilation=False, log_stream=None):
|
| 388 |
+
# defer import to here to avoid circular dependency
|
| 389 |
+
from cupy.cuda import get_nvcc_path
|
| 390 |
+
|
| 391 |
+
if not arch:
|
| 392 |
+
arch = _get_arch()
|
| 393 |
+
|
| 394 |
+
if code_type not in ('cubin', 'ptx'):
|
| 395 |
+
raise ValueError('Invalid code_type %s. Should be cubin or ptx')
|
| 396 |
+
if code_type == 'ptx':
|
| 397 |
+
assert not separate_compilation
|
| 398 |
+
|
| 399 |
+
arch_str = '-gencode=arch=compute_{cc},code=sm_{cc}'.format(cc=arch)
|
| 400 |
+
_nvcc = get_nvcc_path()
|
| 401 |
+
# split() is needed because _nvcc could come from the env var NVCC
|
| 402 |
+
cmd = _nvcc.split()
|
| 403 |
+
cmd.append(arch_str)
|
| 404 |
+
|
| 405 |
+
with tempfile.TemporaryDirectory() as root_dir:
|
| 406 |
+
first_part = filename.split('.')[0]
|
| 407 |
+
|
| 408 |
+
path = os.path.join(root_dir, first_part)
|
| 409 |
+
cu_path = '%s.cu' % path
|
| 410 |
+
result_path = '%s.%s' % (path, code_type)
|
| 411 |
+
|
| 412 |
+
with open(cu_path, 'w') as cu_file:
|
| 413 |
+
cu_file.write(source)
|
| 414 |
+
|
| 415 |
+
if not separate_compilation: # majority cases
|
| 416 |
+
cmd.append('--%s' % code_type)
|
| 417 |
+
cmd += list(options)
|
| 418 |
+
cmd.append(cu_path)
|
| 419 |
+
|
| 420 |
+
try:
|
| 421 |
+
_run_cc(cmd, root_dir, 'nvcc', log_stream)
|
| 422 |
+
except NVCCException as e:
|
| 423 |
+
cex = CompileException(str(e), source, cu_path, options,
|
| 424 |
+
'nvcc')
|
| 425 |
+
|
| 426 |
+
dump = _get_bool_env_variable(
|
| 427 |
+
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
|
| 428 |
+
if dump:
|
| 429 |
+
cex.dump(sys.stderr)
|
| 430 |
+
|
| 431 |
+
raise cex
|
| 432 |
+
else: # two steps: compile to object and device-link
|
| 433 |
+
cmd_partial = cmd.copy()
|
| 434 |
+
cmd_partial.append('--cubin')
|
| 435 |
+
|
| 436 |
+
obj = path + '.o'
|
| 437 |
+
cmd += list(options + ('-o', obj))
|
| 438 |
+
cmd.append(cu_path)
|
| 439 |
+
|
| 440 |
+
try:
|
| 441 |
+
_run_cc(cmd, root_dir, 'nvcc', log_stream)
|
| 442 |
+
except NVCCException as e:
|
| 443 |
+
cex = CompileException(str(e), source, cu_path, options,
|
| 444 |
+
'nvcc')
|
| 445 |
+
|
| 446 |
+
dump = _get_bool_env_variable(
|
| 447 |
+
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
|
| 448 |
+
if dump:
|
| 449 |
+
cex.dump(sys.stderr)
|
| 450 |
+
|
| 451 |
+
raise cex
|
| 452 |
+
|
| 453 |
+
options = _remove_rdc_option(options)
|
| 454 |
+
options += ('--device-link', obj, '-o', path + '.cubin')
|
| 455 |
+
cmd = cmd_partial + list(options)
|
| 456 |
+
|
| 457 |
+
try:
|
| 458 |
+
_run_cc(cmd, root_dir, 'nvcc', log_stream)
|
| 459 |
+
except NVCCException as e:
|
| 460 |
+
cex = CompileException(str(e), '', '', options, 'nvcc')
|
| 461 |
+
raise cex
|
| 462 |
+
|
| 463 |
+
if code_type == 'ptx':
|
| 464 |
+
with open(result_path, 'rb') as ptx_file:
|
| 465 |
+
return ptx_file.read()
|
| 466 |
+
elif code_type == 'cubin':
|
| 467 |
+
with open(result_path, 'rb') as bin_file:
|
| 468 |
+
return bin_file.read()
|
| 469 |
+
else:
|
| 470 |
+
assert False, code_type
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
def _preprocess(source, options, arch, backend):
|
| 474 |
+
if backend == 'nvrtc':
|
| 475 |
+
# For the preprocess it is enough to use PTX method
|
| 476 |
+
# we don't need to explicitly obtain a CUBIN file.
|
| 477 |
+
options += ('-arch=compute_{}'.format(arch),)
|
| 478 |
+
prog = _NVRTCProgram(source)
|
| 479 |
+
try:
|
| 480 |
+
result, _ = prog.compile(options)
|
| 481 |
+
except CompileException as e:
|
| 482 |
+
dump = _get_bool_env_variable(
|
| 483 |
+
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
|
| 484 |
+
if dump:
|
| 485 |
+
e.dump(sys.stderr)
|
| 486 |
+
raise
|
| 487 |
+
elif backend == 'nvcc':
|
| 488 |
+
try:
|
| 489 |
+
result = compile_using_nvcc(source, options, arch, 'preprocess.cu',
|
| 490 |
+
code_type='ptx')
|
| 491 |
+
except CompileException as e:
|
| 492 |
+
dump = _get_bool_env_variable(
|
| 493 |
+
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
|
| 494 |
+
if dump:
|
| 495 |
+
e.dump(sys.stderr)
|
| 496 |
+
raise
|
| 497 |
+
else:
|
| 498 |
+
raise ValueError('Invalid backend %s' % backend)
|
| 499 |
+
|
| 500 |
+
assert isinstance(result, bytes)
|
| 501 |
+
|
| 502 |
+
# Extract the part containing version information.
|
| 503 |
+
return '\n'.join(
|
| 504 |
+
x for x in result.decode().splitlines() if x.startswith('//'))
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def get_cache_dir():
|
| 511 |
+
return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
_empty_file_preprocess_cache: dict = {}
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
def _compile_module_with_cache(
|
| 518 |
+
source, options=(), arch=None, cache_dir=None, extra_source=None,
|
| 519 |
+
backend='nvrtc', *, enable_cooperative_groups=False,
|
| 520 |
+
name_expressions=None, log_stream=None, jitify=False):
|
| 521 |
+
|
| 522 |
+
if enable_cooperative_groups:
|
| 523 |
+
if runtime.is_hip:
|
| 524 |
+
raise ValueError(
|
| 525 |
+
'Cooperative groups is not supported in HIP.')
|
| 526 |
+
|
| 527 |
+
if name_expressions is not None and backend != 'nvrtc':
|
| 528 |
+
raise NotImplementedError
|
| 529 |
+
|
| 530 |
+
# We silently ignore CUPY_CACHE_IN_MEMORY if nvcc/hipcc are in use, because
|
| 531 |
+
# they must dump files to disk.
|
| 532 |
+
cache_in_memory = (
|
| 533 |
+
_get_bool_env_variable('CUPY_CACHE_IN_MEMORY', False)
|
| 534 |
+
and backend == 'nvrtc')
|
| 535 |
+
|
| 536 |
+
if runtime.is_hip:
|
| 537 |
+
backend = 'hiprtc' if backend == 'nvrtc' else 'hipcc'
|
| 538 |
+
return _compile_with_cache_hip(
|
| 539 |
+
source, options, arch, cache_dir, extra_source, backend,
|
| 540 |
+
name_expressions, log_stream, cache_in_memory)
|
| 541 |
+
else:
|
| 542 |
+
return _compile_with_cache_cuda(
|
| 543 |
+
source, options, arch, cache_dir, extra_source, backend,
|
| 544 |
+
enable_cooperative_groups, name_expressions, log_stream,
|
| 545 |
+
cache_in_memory, jitify)
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
def _compile_with_cache_cuda(
|
| 549 |
+
source, options, arch, cache_dir, extra_source=None, backend='nvrtc',
|
| 550 |
+
enable_cooperative_groups=False, name_expressions=None,
|
| 551 |
+
log_stream=None, cache_in_memory=False, jitify=False):
|
| 552 |
+
# NVRTC does not use extra_source. extra_source is used for cache key.
|
| 553 |
+
global _empty_file_preprocess_cache
|
| 554 |
+
if cache_dir is None:
|
| 555 |
+
cache_dir = get_cache_dir()
|
| 556 |
+
if arch is None:
|
| 557 |
+
arch = _get_arch()
|
| 558 |
+
|
| 559 |
+
options += ('-ftz=true',)
|
| 560 |
+
|
| 561 |
+
if enable_cooperative_groups:
|
| 562 |
+
# `cooperative_groups` requires relocatable device code.
|
| 563 |
+
options += ('--device-c',)
|
| 564 |
+
|
| 565 |
+
if _get_bool_env_variable('CUPY_CUDA_COMPILE_WITH_DEBUG', False):
|
| 566 |
+
options += ('--device-debug', '--generate-line-info')
|
| 567 |
+
|
| 568 |
+
is_jitify_requested = ('-DCUPY_USE_JITIFY' in options)
|
| 569 |
+
if jitify and not is_jitify_requested:
|
| 570 |
+
# jitify is set in RawKernel/RawModule, translate it to an option
|
| 571 |
+
# that is useless to the compiler, but can be used as part of the
|
| 572 |
+
# hash key
|
| 573 |
+
options += ('-DCUPY_USE_JITIFY',)
|
| 574 |
+
elif is_jitify_requested and not jitify:
|
| 575 |
+
# jitify is requested internally, just set the flag
|
| 576 |
+
jitify = True
|
| 577 |
+
if jitify and backend != 'nvrtc':
|
| 578 |
+
raise ValueError('jitify only works with NVRTC')
|
| 579 |
+
|
| 580 |
+
options += _get_extra_include_dir_opts()
|
| 581 |
+
env = ((arch, options, _get_nvrtc_version(), backend)
|
| 582 |
+
+ _get_arch_for_options_for_nvrtc(arch))
|
| 583 |
+
base = _empty_file_preprocess_cache.get(env, None)
|
| 584 |
+
if base is None:
|
| 585 |
+
# This is for checking NVRTC/NVCC compiler internal version
|
| 586 |
+
base = _preprocess('', options, arch, backend)
|
| 587 |
+
_empty_file_preprocess_cache[env] = base
|
| 588 |
+
|
| 589 |
+
key_src = '%s %s %s %s %s' % (
|
| 590 |
+
env, base, source, extra_source, _get_cupy_cache_key())
|
| 591 |
+
key_src = key_src.encode('utf-8')
|
| 592 |
+
name = _hash_hexdigest(key_src) + '.cubin'
|
| 593 |
+
|
| 594 |
+
mod = function.Module()
|
| 595 |
+
|
| 596 |
+
if not cache_in_memory:
|
| 597 |
+
# Read from disk cache
|
| 598 |
+
if not os.path.isdir(cache_dir):
|
| 599 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 600 |
+
|
| 601 |
+
# To handle conflicts in concurrent situation, we adopt lock-free
|
| 602 |
+
# method to avoid performance degradation.
|
| 603 |
+
# We force recompiling to retrieve C++ mangled names if so desired.
|
| 604 |
+
path = os.path.join(cache_dir, name)
|
| 605 |
+
if os.path.exists(path) and not name_expressions:
|
| 606 |
+
with open(path, 'rb') as file:
|
| 607 |
+
data = file.read()
|
| 608 |
+
if len(data) >= _hash_length:
|
| 609 |
+
hash = data[:_hash_length]
|
| 610 |
+
cubin = data[_hash_length:]
|
| 611 |
+
cubin_hash = _hash_hexdigest(cubin).encode('ascii')
|
| 612 |
+
if hash == cubin_hash:
|
| 613 |
+
mod.load(cubin)
|
| 614 |
+
return mod
|
| 615 |
+
else:
|
| 616 |
+
# Enforce compiling -- the resulting kernel will be cached elsewhere,
|
| 617 |
+
# so we do nothing
|
| 618 |
+
pass
|
| 619 |
+
|
| 620 |
+
if backend == 'nvrtc':
|
| 621 |
+
cu_name = '' if cache_in_memory else name + '.cu'
|
| 622 |
+
ptx, mapping = compile_using_nvrtc(
|
| 623 |
+
source, options, arch, cu_name, name_expressions,
|
| 624 |
+
log_stream, cache_in_memory, jitify)
|
| 625 |
+
if _is_cudadevrt_needed(options):
|
| 626 |
+
# for separate compilation
|
| 627 |
+
ls = function.LinkState()
|
| 628 |
+
ls.add_ptr_data(ptx, 'cupy.ptx')
|
| 629 |
+
_cudadevrt = _get_cudadevrt_path()
|
| 630 |
+
ls.add_ptr_file(_cudadevrt)
|
| 631 |
+
cubin = ls.complete()
|
| 632 |
+
else:
|
| 633 |
+
cubin = ptx
|
| 634 |
+
mod._set_mapping(mapping)
|
| 635 |
+
elif backend == 'nvcc':
|
| 636 |
+
rdc = _is_cudadevrt_needed(options)
|
| 637 |
+
cubin = compile_using_nvcc(source, options, arch,
|
| 638 |
+
name + '.cu', code_type='cubin',
|
| 639 |
+
separate_compilation=rdc,
|
| 640 |
+
log_stream=log_stream)
|
| 641 |
+
else:
|
| 642 |
+
raise ValueError('Invalid backend %s' % backend)
|
| 643 |
+
|
| 644 |
+
if not cache_in_memory:
|
| 645 |
+
# Write to disk cache
|
| 646 |
+
cubin_hash = _hash_hexdigest(cubin).encode('ascii')
|
| 647 |
+
|
| 648 |
+
# shutil.move is not atomic operation, so it could result in a
|
| 649 |
+
# corrupted file. We detect it by appending a hash at the beginning
|
| 650 |
+
# of each cache file. If the file is corrupted, it will be ignored
|
| 651 |
+
# next time it is read.
|
| 652 |
+
with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:
|
| 653 |
+
tf.write(cubin_hash)
|
| 654 |
+
tf.write(cubin)
|
| 655 |
+
temp_path = tf.name
|
| 656 |
+
shutil.move(temp_path, path)
|
| 657 |
+
|
| 658 |
+
# Save .cu source file along with .cubin
|
| 659 |
+
if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):
|
| 660 |
+
with open(path + '.cu', 'w') as f:
|
| 661 |
+
f.write(source)
|
| 662 |
+
else:
|
| 663 |
+
# we don't do any disk I/O
|
| 664 |
+
pass
|
| 665 |
+
|
| 666 |
+
mod.load(cubin)
|
| 667 |
+
return mod
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
class CompileException(Exception):
|
| 671 |
+
|
| 672 |
+
def __init__(self, msg, source, name, options, backend='nvrtc'):
|
| 673 |
+
self._msg = msg
|
| 674 |
+
self.source = source
|
| 675 |
+
self.name = name
|
| 676 |
+
self.options = options
|
| 677 |
+
self.backend = backend
|
| 678 |
+
super(CompileException, self).__init__()
|
| 679 |
+
|
| 680 |
+
def __reduce__(self):
|
| 681 |
+
return (type(self), (self._msg, self.source, self.name,
|
| 682 |
+
self.options, self.backend))
|
| 683 |
+
|
| 684 |
+
def __repr__(self):
|
| 685 |
+
return str(self)
|
| 686 |
+
|
| 687 |
+
def __str__(self):
|
| 688 |
+
return self.get_message()
|
| 689 |
+
|
| 690 |
+
def get_message(self):
|
| 691 |
+
return self._msg
|
| 692 |
+
|
| 693 |
+
def dump(self, f):
|
| 694 |
+
lines = self.source.split('\n')
|
| 695 |
+
digits = int(math.floor(math.log10(len(lines)))) + 1
|
| 696 |
+
linum_fmt = '{{:0{}d}} '.format(digits)
|
| 697 |
+
f.write('{} '.format(self.backend.upper()))
|
| 698 |
+
f.write('compilation error: {}\n'.format(self))
|
| 699 |
+
f.write('-----\n')
|
| 700 |
+
f.write('Name: {}\n'.format(self.name))
|
| 701 |
+
f.write('Options: {}\n'.format(' '.join(self.options)))
|
| 702 |
+
f.write('CUDA source:\n')
|
| 703 |
+
for i, line in enumerate(lines):
|
| 704 |
+
f.write(linum_fmt.format(i + 1) + line.rstrip() + '\n')
|
| 705 |
+
f.write('-----\n')
|
| 706 |
+
f.flush()
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
class _NVRTCProgram(object):
|
| 710 |
+
|
| 711 |
+
def __init__(self, src, name='default_program', headers=(),
|
| 712 |
+
include_names=(), name_expressions=None, method='ptx'):
|
| 713 |
+
self.ptr = None
|
| 714 |
+
|
| 715 |
+
if isinstance(src, bytes):
|
| 716 |
+
src = src.decode('UTF-8')
|
| 717 |
+
if isinstance(name, bytes):
|
| 718 |
+
name = name.decode('UTF-8')
|
| 719 |
+
|
| 720 |
+
self.src = src
|
| 721 |
+
self.name = name
|
| 722 |
+
self.ptr = nvrtc.createProgram(src, name, headers, include_names)
|
| 723 |
+
self.name_expressions = name_expressions
|
| 724 |
+
self.method = method
|
| 725 |
+
|
| 726 |
+
def __del__(self, is_shutting_down=_util.is_shutting_down):
|
| 727 |
+
if is_shutting_down():
|
| 728 |
+
return
|
| 729 |
+
if self.ptr:
|
| 730 |
+
nvrtc.destroyProgram(self.ptr)
|
| 731 |
+
|
| 732 |
+
def compile(self, options=(), log_stream=None):
|
| 733 |
+
try:
|
| 734 |
+
if self.name_expressions:
|
| 735 |
+
for ker in self.name_expressions:
|
| 736 |
+
nvrtc.addNameExpression(self.ptr, ker)
|
| 737 |
+
nvrtc.compileProgram(self.ptr, options)
|
| 738 |
+
mapping = None
|
| 739 |
+
if self.name_expressions:
|
| 740 |
+
mapping = {}
|
| 741 |
+
for ker in self.name_expressions:
|
| 742 |
+
mapping[ker] = nvrtc.getLoweredName(self.ptr, ker)
|
| 743 |
+
if log_stream is not None:
|
| 744 |
+
log_stream.write(nvrtc.getProgramLog(self.ptr))
|
| 745 |
+
# This is to ensure backwards compatibility with nvrtc
|
| 746 |
+
if self.method == 'cubin':
|
| 747 |
+
return nvrtc.getCUBIN(self.ptr), mapping
|
| 748 |
+
elif self.method == 'ptx':
|
| 749 |
+
return nvrtc.getPTX(self.ptr), mapping
|
| 750 |
+
# TODO(leofang): support JIT LTO using nvrtc.getNVVM()?
|
| 751 |
+
# need -dlto and -arch=compute_XX
|
| 752 |
+
else:
|
| 753 |
+
raise RuntimeError('Unknown NVRTC compile method')
|
| 754 |
+
except nvrtc.NVRTCError:
|
| 755 |
+
log = nvrtc.getProgramLog(self.ptr)
|
| 756 |
+
raise CompileException(log, self.src, self.name, options,
|
| 757 |
+
'nvrtc' if not runtime.is_hip else 'hiprtc')
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
def is_valid_kernel_name(name):
|
| 761 |
+
return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', name) is not None
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
def compile_using_hipcc(source, options, arch, log_stream=None):
|
| 765 |
+
# As of ROCm 3.5.0 hiprtc/hipcc can automatically pick up the
|
| 766 |
+
# right arch without setting HCC_AMDGPU_TARGET, so we don't need
|
| 767 |
+
# to set arch here
|
| 768 |
+
cmd = ['hipcc', '--genco'] + list(options)
|
| 769 |
+
|
| 770 |
+
with tempfile.TemporaryDirectory() as root_dir:
|
| 771 |
+
path = os.path.join(root_dir, 'kern')
|
| 772 |
+
in_path = path + '.cpp'
|
| 773 |
+
out_path = path + '.hsaco'
|
| 774 |
+
|
| 775 |
+
with open(in_path, 'w') as f:
|
| 776 |
+
f.write(source)
|
| 777 |
+
|
| 778 |
+
cmd += [in_path, '-o', out_path]
|
| 779 |
+
|
| 780 |
+
try:
|
| 781 |
+
output = _run_cc(cmd, root_dir, 'hipcc', log_stream)
|
| 782 |
+
except HIPCCException as e:
|
| 783 |
+
cex = CompileException(str(e), source, in_path, options,
|
| 784 |
+
'hipcc')
|
| 785 |
+
|
| 786 |
+
dump = _get_bool_env_variable(
|
| 787 |
+
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
|
| 788 |
+
if dump:
|
| 789 |
+
cex.dump(sys.stderr)
|
| 790 |
+
|
| 791 |
+
raise cex
|
| 792 |
+
if not os.path.isfile(out_path):
|
| 793 |
+
raise HIPCCException(
|
| 794 |
+
'`hipcc` command does not generate output file. \n'
|
| 795 |
+
'command: {0}\n'
|
| 796 |
+
'stdout/stderr: \n'
|
| 797 |
+
'{1}'.format(cmd, output))
|
| 798 |
+
with open(out_path, 'rb') as f:
|
| 799 |
+
return f.read()
|
| 800 |
+
|
| 801 |
+
|
| 802 |
+
# TODO(leofang): consider merge _preprocess_hipcc with _preprocess_hiprtc,
|
| 803 |
+
# perhaps also with _preprocess?
|
| 804 |
+
def _preprocess_hipcc(source, options):
|
| 805 |
+
cmd = ['hipcc', '--preprocess'] + list(options)
|
| 806 |
+
with tempfile.TemporaryDirectory() as root_dir:
|
| 807 |
+
path = os.path.join(root_dir, 'kern')
|
| 808 |
+
cu_path = '%s.cpp' % path
|
| 809 |
+
|
| 810 |
+
with open(cu_path, 'w') as cu_file:
|
| 811 |
+
cu_file.write(source)
|
| 812 |
+
|
| 813 |
+
cmd.append(cu_path)
|
| 814 |
+
pp_src = _run_cc(cmd, root_dir, 'hipcc')
|
| 815 |
+
assert isinstance(pp_src, str)
|
| 816 |
+
return re.sub('(?m)^#.*$', '', pp_src)
|
| 817 |
+
|
| 818 |
+
|
| 819 |
+
def _preprocess_hiprtc(source, options):
|
| 820 |
+
# source is ignored
|
| 821 |
+
if _cuda_hip_version >= 40400000:
|
| 822 |
+
# HIP runtime headers can be no longer explicitly included on ROCm 4.5+
|
| 823 |
+
code = '''
|
| 824 |
+
// hiprtc segfaults if the input code is empty
|
| 825 |
+
__global__ void _cupy_preprocess_dummy_kernel_() { }
|
| 826 |
+
'''
|
| 827 |
+
else:
|
| 828 |
+
code = '''
|
| 829 |
+
// hiprtc segfaults if the input code is empty
|
| 830 |
+
#include <hip/hip_runtime.h>
|
| 831 |
+
__global__ void _cupy_preprocess_dummy_kernel_() { }
|
| 832 |
+
'''
|
| 833 |
+
|
| 834 |
+
prog = _NVRTCProgram(code)
|
| 835 |
+
try:
|
| 836 |
+
result, _ = prog.compile(options)
|
| 837 |
+
except CompileException as e:
|
| 838 |
+
dump = _get_bool_env_variable(
|
| 839 |
+
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
|
| 840 |
+
if dump:
|
| 841 |
+
e.dump(sys.stderr)
|
| 842 |
+
raise
|
| 843 |
+
assert isinstance(result, bytes)
|
| 844 |
+
return result
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
_hip_extra_source = None
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
def _convert_to_hip_source(source, extra_source, is_hiprtc):
|
| 851 |
+
if not is_hiprtc:
|
| 852 |
+
return '#include <hip/hip_runtime.h>\n' + source
|
| 853 |
+
if _cuda_hip_version >= 40400000:
|
| 854 |
+
# HIP runtime headers can be no longer explicitly included on ROCm 4.5+
|
| 855 |
+
return source
|
| 856 |
+
if _cuda_hip_version >= 402:
|
| 857 |
+
# "-I" is fixed on ROCm 4.2.0+
|
| 858 |
+
return '#include <hip/hip_runtime.h>\n' + source
|
| 859 |
+
|
| 860 |
+
# Workaround for hiprtc: it does not follow the -I option to search
|
| 861 |
+
# headers (as of ROCm 3.5.0), so we must prepend all CuPy's headers
|
| 862 |
+
global _hip_extra_source
|
| 863 |
+
if _hip_extra_source is None:
|
| 864 |
+
if extra_source is not None:
|
| 865 |
+
extra_source = extra_source.split('\n')
|
| 866 |
+
extra_source = [line for line in extra_source if (
|
| 867 |
+
not line.startswith('#include')
|
| 868 |
+
and not line.startswith('#pragma once'))]
|
| 869 |
+
_hip_extra_source = extra_source = '\n'.join(extra_source)
|
| 870 |
+
|
| 871 |
+
source = source.split('\n')
|
| 872 |
+
source = [line for line in source if not line.startswith('#include')]
|
| 873 |
+
source = ('#include <hip/hip_runtime.h>\n#include <hip/hip_fp16.h>\n'
|
| 874 |
+
+ _hip_extra_source + '\n'.join(source))
|
| 875 |
+
|
| 876 |
+
return source
|
| 877 |
+
|
| 878 |
+
|
| 879 |
+
# TODO(leofang): evaluate if this can be merged with _compile_with_cache_cuda()
|
| 880 |
+
def _compile_with_cache_hip(source, options, arch, cache_dir, extra_source,
|
| 881 |
+
backend='hiprtc', name_expressions=None,
|
| 882 |
+
log_stream=None, cache_in_memory=False,
|
| 883 |
+
use_converter=True):
|
| 884 |
+
global _empty_file_preprocess_cache
|
| 885 |
+
|
| 886 |
+
# TODO(leofang): this might be possible but is currently undocumented
|
| 887 |
+
if _is_cudadevrt_needed(options):
|
| 888 |
+
raise ValueError('separate compilation is not supported in HIP')
|
| 889 |
+
|
| 890 |
+
# HIP's equivalent of -ftz=true, see ROCm-Developer-Tools/HIP#2252
|
| 891 |
+
# Notes:
|
| 892 |
+
# - For hipcc, this should just work, as invalid options would cause errors
|
| 893 |
+
# See https://clang.llvm.org/docs/ClangCommandLineReference.html.
|
| 894 |
+
# - For hiprtc, this is a no-op until the compiler options like -D and -I
|
| 895 |
+
# are accepted, see ROCm-Developer-Tools/HIP#2182 and
|
| 896 |
+
# ROCm-Developer-Tools/HIP#2248
|
| 897 |
+
options += ('-fcuda-flush-denormals-to-zero',)
|
| 898 |
+
|
| 899 |
+
# Workaround ROCm 4.3 LLVM_PATH issue in hipRTC #5689
|
| 900 |
+
rocm_build_version = driver.get_build_version()
|
| 901 |
+
if rocm_build_version >= 40300000 and rocm_build_version < 40500000:
|
| 902 |
+
options += (
|
| 903 |
+
'-I' + get_rocm_path() + '/llvm/lib/clang/13.0.0/include/',)
|
| 904 |
+
|
| 905 |
+
if cache_dir is None:
|
| 906 |
+
cache_dir = get_cache_dir()
|
| 907 |
+
# As of ROCm 3.5.0 hiprtc/hipcc can automatically pick up the
|
| 908 |
+
# right arch without setting HCC_AMDGPU_TARGET, so we don't need
|
| 909 |
+
# to tell the compiler which arch we are targeting. But, we still
|
| 910 |
+
# need to know arch as part of the cache key:
|
| 911 |
+
if arch is None:
|
| 912 |
+
# On HIP, gcnArch is computed from "compute capability":
|
| 913 |
+
# https://github.com/ROCm-Developer-Tools/HIP/blob/rocm-4.0.0/rocclr/hip_device.cpp#L202
|
| 914 |
+
arch = device.Device().compute_capability
|
| 915 |
+
if use_converter:
|
| 916 |
+
source = _convert_to_hip_source(source, extra_source,
|
| 917 |
+
is_hiprtc=(backend == 'hiprtc'))
|
| 918 |
+
|
| 919 |
+
env = (arch, options, _get_nvrtc_version(), backend)
|
| 920 |
+
base = _empty_file_preprocess_cache.get(env, None)
|
| 921 |
+
if base is None:
|
| 922 |
+
# This is for checking HIPRTC/HIPCC compiler internal version
|
| 923 |
+
if backend == 'hiprtc':
|
| 924 |
+
base = _preprocess_hiprtc('', options)
|
| 925 |
+
else:
|
| 926 |
+
base = _preprocess_hipcc('', options)
|
| 927 |
+
_empty_file_preprocess_cache[env] = base
|
| 928 |
+
|
| 929 |
+
key_src = '%s %s %s %s' % (env, base, source, extra_source)
|
| 930 |
+
key_src = key_src.encode('utf-8')
|
| 931 |
+
name = _hash_hexdigest(key_src) + '.hsaco'
|
| 932 |
+
|
| 933 |
+
mod = function.Module()
|
| 934 |
+
|
| 935 |
+
if not cache_in_memory:
|
| 936 |
+
# Read from disk cache
|
| 937 |
+
if not os.path.isdir(cache_dir):
|
| 938 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 939 |
+
|
| 940 |
+
# To handle conflicts in concurrent situation, we adopt lock-free
|
| 941 |
+
# method to avoid performance degradation.
|
| 942 |
+
# We force recompiling to retrieve C++ mangled names if so desired.
|
| 943 |
+
path = os.path.join(cache_dir, name)
|
| 944 |
+
if os.path.exists(path) and not name_expressions:
|
| 945 |
+
with open(path, 'rb') as f:
|
| 946 |
+
data = f.read()
|
| 947 |
+
if len(data) >= _hash_length:
|
| 948 |
+
hash_value = data[:_hash_length]
|
| 949 |
+
binary = data[_hash_length:]
|
| 950 |
+
binary_hash = _hash_hexdigest(binary).encode('ascii')
|
| 951 |
+
if hash_value == binary_hash:
|
| 952 |
+
mod.load(binary)
|
| 953 |
+
return mod
|
| 954 |
+
else:
|
| 955 |
+
# Enforce compiling -- the resulting kernel will be cached elsewhere,
|
| 956 |
+
# so we do nothing
|
| 957 |
+
pass
|
| 958 |
+
|
| 959 |
+
if backend == 'hiprtc':
|
| 960 |
+
# compile_using_nvrtc calls hiprtc for hip builds
|
| 961 |
+
binary, mapping = compile_using_nvrtc(
|
| 962 |
+
source, options, arch, name + '.cu', name_expressions,
|
| 963 |
+
log_stream, cache_in_memory)
|
| 964 |
+
mod._set_mapping(mapping)
|
| 965 |
+
else:
|
| 966 |
+
binary = compile_using_hipcc(source, options, arch, log_stream)
|
| 967 |
+
|
| 968 |
+
if not cache_in_memory:
|
| 969 |
+
# Write to disk cache
|
| 970 |
+
binary_hash = _hash_hexdigest(binary).encode('ascii')
|
| 971 |
+
|
| 972 |
+
# shutil.move is not atomic operation, so it could result in a
|
| 973 |
+
# corrupted file. We detect it by appending a hash at the beginning
|
| 974 |
+
# of each cache file. If the file is corrupted, it will be ignored
|
| 975 |
+
# next time it is read.
|
| 976 |
+
with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:
|
| 977 |
+
tf.write(binary_hash)
|
| 978 |
+
tf.write(binary)
|
| 979 |
+
temp_path = tf.name
|
| 980 |
+
shutil.move(temp_path, path)
|
| 981 |
+
|
| 982 |
+
# Save .cu source file along with .hsaco
|
| 983 |
+
if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):
|
| 984 |
+
with open(path + '.cpp', 'w') as f:
|
| 985 |
+
f.write(source)
|
| 986 |
+
else:
|
| 987 |
+
# we don't do any disk I/O
|
| 988 |
+
pass
|
| 989 |
+
|
| 990 |
+
mod.load(binary)
|
| 991 |
+
return mod
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cudnn.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
cuDNN Wrapper
|
| 3 |
+
|
| 4 |
+
Use `cupy_backends.cuda.libs.cudnn` directly in CuPy codebase.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from cupy import _environment
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
available = True
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
_environment._preload_library('cudnn')
|
| 14 |
+
from cupy_backends.cuda.libs.cudnn import * # NOQA
|
| 15 |
+
except ImportError as e:
|
| 16 |
+
available = False
|
| 17 |
+
_environment._preload_warning('cudnn', e)
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cufft.pxd
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Note: nothing exposed in this pxd header is considered public API;
|
| 2 |
+
# we copy this to sdist only because it's needed at runtime to support
|
| 3 |
+
# cuFFT callbacks
|
| 4 |
+
|
| 5 |
+
from libc.stdint cimport intptr_t
|
| 6 |
+
|
| 7 |
+
cdef extern from *:
|
| 8 |
+
ctypedef float Float 'cufftReal'
|
| 9 |
+
ctypedef double Double 'cufftDoubleReal'
|
| 10 |
+
ctypedef int Result 'cufftResult_t'
|
| 11 |
+
|
| 12 |
+
IF CUPY_HIP_VERSION > 0:
|
| 13 |
+
ctypedef int Handle 'cufftHandle'
|
| 14 |
+
ELSE:
|
| 15 |
+
ctypedef struct hipHandle 'hipfftHandle_t':
|
| 16 |
+
pass
|
| 17 |
+
ctypedef hipHandle* Handle 'cufftHandle'
|
| 18 |
+
|
| 19 |
+
ctypedef enum Type 'cufftType_t':
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
cpdef enum:
|
| 24 |
+
CUFFT_C2C = 0x29
|
| 25 |
+
CUFFT_R2C = 0x2a
|
| 26 |
+
CUFFT_C2R = 0x2c
|
| 27 |
+
CUFFT_Z2Z = 0x69
|
| 28 |
+
CUFFT_D2Z = 0x6a
|
| 29 |
+
CUFFT_Z2D = 0x6c
|
| 30 |
+
|
| 31 |
+
CUFFT_FORWARD = -1
|
| 32 |
+
CUFFT_INVERSE = 1
|
| 33 |
+
|
| 34 |
+
CUFFT_CB_LD_COMPLEX = 0x0,
|
| 35 |
+
CUFFT_CB_LD_COMPLEX_DOUBLE = 0x1,
|
| 36 |
+
CUFFT_CB_LD_REAL = 0x2,
|
| 37 |
+
CUFFT_CB_LD_REAL_DOUBLE = 0x3,
|
| 38 |
+
CUFFT_CB_ST_COMPLEX = 0x4,
|
| 39 |
+
CUFFT_CB_ST_COMPLEX_DOUBLE = 0x5,
|
| 40 |
+
CUFFT_CB_ST_REAL = 0x6,
|
| 41 |
+
CUFFT_CB_ST_REAL_DOUBLE = 0x7,
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
cpdef get_current_plan()
|
| 45 |
+
cpdef int getVersion() except? -1
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
cdef class Plan1d:
|
| 49 |
+
cdef:
|
| 50 |
+
readonly intptr_t handle
|
| 51 |
+
readonly object work_area # can be MemoryPointer or a list of it
|
| 52 |
+
readonly int nx
|
| 53 |
+
readonly int batch
|
| 54 |
+
readonly Type fft_type
|
| 55 |
+
|
| 56 |
+
readonly list gpus
|
| 57 |
+
list batch_share
|
| 58 |
+
list gather_streams
|
| 59 |
+
list gather_events
|
| 60 |
+
dict scatter_streams
|
| 61 |
+
dict scatter_events
|
| 62 |
+
intptr_t xtArr
|
| 63 |
+
list xtArr_buffer
|
| 64 |
+
|
| 65 |
+
void _single_gpu_get_plan(
|
| 66 |
+
self, Handle plan, int nx, int fft_type, int batch) except*
|
| 67 |
+
void _multi_gpu_get_plan(
|
| 68 |
+
self, Handle plan, int nx, int fft_type, int batch,
|
| 69 |
+
devices, out) except*
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
cdef class PlanNd:
|
| 73 |
+
cdef:
|
| 74 |
+
readonly intptr_t handle
|
| 75 |
+
readonly object work_area # memory.MemoryPointer
|
| 76 |
+
readonly tuple shape
|
| 77 |
+
readonly Type fft_type
|
| 78 |
+
readonly str order
|
| 79 |
+
readonly int last_axis
|
| 80 |
+
readonly object last_size
|
| 81 |
+
|
| 82 |
+
# TODO(leofang): support multi-GPU transforms
|
| 83 |
+
readonly list gpus
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
cdef class XtPlanNd:
|
| 87 |
+
cdef:
|
| 88 |
+
readonly intptr_t handle
|
| 89 |
+
readonly object work_area # memory.MemoryPointer
|
| 90 |
+
readonly tuple shape
|
| 91 |
+
readonly int itype
|
| 92 |
+
readonly int otype
|
| 93 |
+
readonly int etype
|
| 94 |
+
readonly str order
|
| 95 |
+
readonly int last_axis
|
| 96 |
+
readonly object last_size
|
| 97 |
+
|
| 98 |
+
# TODO(leofang): support multi-GPU transforms
|
| 99 |
+
readonly list gpus
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cufft.pyx
ADDED
|
@@ -0,0 +1,1205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cimport cython # NOQA
|
| 2 |
+
from cpython.mem cimport PyMem_Malloc, PyMem_Free
|
| 3 |
+
from libc.string cimport memset as c_memset
|
| 4 |
+
from libcpp cimport vector
|
| 5 |
+
|
| 6 |
+
import numpy
|
| 7 |
+
import threading
|
| 8 |
+
|
| 9 |
+
import cupy
|
| 10 |
+
from cupy.cuda import device
|
| 11 |
+
from cupy.cuda import memory
|
| 12 |
+
from cupy.cuda import runtime
|
| 13 |
+
from cupy.cuda import stream
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
cdef object _thread_local = threading.local()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
cpdef get_current_plan():
|
| 20 |
+
"""Get current cuFFT plan.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
None or cupy.cuda.cufft.Plan1d or cupy.cuda.cufft.PlanNd
|
| 24 |
+
"""
|
| 25 |
+
if not hasattr(_thread_local, '_current_plan'):
|
| 26 |
+
_thread_local._current_plan = None
|
| 27 |
+
return _thread_local._current_plan
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
cdef enum:
|
| 31 |
+
# Actually, this is 64, but it's undocumented. For the sake
|
| 32 |
+
# of safety, let us use 16, which agrees with the cuFFT doc.
|
| 33 |
+
MAX_CUDA_DESCRIPTOR_GPUS = 16
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
cdef extern from 'cupy_cufft.h' nogil:
|
| 37 |
+
# we duplicate some types here to avoid cimporting from driver/runtime,
|
| 38 |
+
# as we don't include their .pxd files in the sdist
|
| 39 |
+
ctypedef void* Stream 'cudaStream_t'
|
| 40 |
+
ctypedef int DataType 'cudaDataType'
|
| 41 |
+
|
| 42 |
+
ctypedef struct Complex 'cufftComplex':
|
| 43 |
+
float x, y
|
| 44 |
+
ctypedef struct DoubleComplex 'cufftDoubleComplex':
|
| 45 |
+
double x, y
|
| 46 |
+
|
| 47 |
+
# cuFFT Helper Function
|
| 48 |
+
Result cufftCreate(Handle *plan)
|
| 49 |
+
Result cufftDestroy(Handle plan)
|
| 50 |
+
Result cufftSetAutoAllocation(Handle plan, int autoAllocate)
|
| 51 |
+
Result cufftSetWorkArea(Handle plan, void *workArea)
|
| 52 |
+
|
| 53 |
+
# cuFFT Stream Function
|
| 54 |
+
Result cufftSetStream(Handle plan, Stream streamId)
|
| 55 |
+
|
| 56 |
+
# cuFFT Plan Functions
|
| 57 |
+
Result cufftMakePlan1d(Handle plan, int nx, Type type, int batch,
|
| 58 |
+
size_t *workSize)
|
| 59 |
+
Result cufftMakePlanMany(Handle plan, int rank, int *n, int *inembed,
|
| 60 |
+
int istride, int idist, int *onembed, int ostride,
|
| 61 |
+
int odist, Type type, int batch,
|
| 62 |
+
size_t *workSize)
|
| 63 |
+
|
| 64 |
+
# cuFFT Exec Function
|
| 65 |
+
Result cufftExecC2C(Handle plan, Complex *idata, Complex *odata,
|
| 66 |
+
int direction)
|
| 67 |
+
Result cufftExecR2C(Handle plan, Float *idata, Complex *odata)
|
| 68 |
+
Result cufftExecC2R(Handle plan, Complex *idata, Float *odata)
|
| 69 |
+
Result cufftExecZ2Z(Handle plan, DoubleComplex *idata,
|
| 70 |
+
DoubleComplex *odata, int direction)
|
| 71 |
+
Result cufftExecD2Z(Handle plan, Double *idata, DoubleComplex *odata)
|
| 72 |
+
Result cufftExecZ2D(Handle plan, DoubleComplex *idata, Double *odata)
|
| 73 |
+
|
| 74 |
+
# Version
|
| 75 |
+
Result cufftGetVersion(int* version)
|
| 76 |
+
|
| 77 |
+
# cufftXt data types
|
| 78 |
+
ctypedef struct XtArrayDesc 'cudaXtDesc':
|
| 79 |
+
int version
|
| 80 |
+
int nGPUs
|
| 81 |
+
int GPUs[MAX_CUDA_DESCRIPTOR_GPUS]
|
| 82 |
+
void* data[MAX_CUDA_DESCRIPTOR_GPUS]
|
| 83 |
+
size_t size[MAX_CUDA_DESCRIPTOR_GPUS]
|
| 84 |
+
void* cudaXtState
|
| 85 |
+
|
| 86 |
+
ctypedef enum XtSubFormat 'cufftXtSubFormat':
|
| 87 |
+
CUFFT_XT_FORMAT_INPUT
|
| 88 |
+
CUFFT_XT_FORMAT_OUTPUT
|
| 89 |
+
CUFFT_XT_FORMAT_INPLACE
|
| 90 |
+
CUFFT_XT_FORMAT_INPLACE_SHUFFLED
|
| 91 |
+
CUFFT_XT_FORMAT_1D_INPUT_SHUFFLED
|
| 92 |
+
|
| 93 |
+
ctypedef struct XtArray 'cudaLibXtDesc':
|
| 94 |
+
int version
|
| 95 |
+
XtArrayDesc* descriptor
|
| 96 |
+
int library
|
| 97 |
+
XtSubFormat subFormat
|
| 98 |
+
void* libDescriptor
|
| 99 |
+
|
| 100 |
+
ctypedef enum XtCopyType 'cufftXtCopyType':
|
| 101 |
+
CUFFT_COPY_HOST_TO_DEVICE = 0x00
|
| 102 |
+
CUFFT_COPY_DEVICE_TO_HOST = 0x01
|
| 103 |
+
CUFFT_COPY_DEVICE_TO_DEVICE = 0x02
|
| 104 |
+
|
| 105 |
+
# cufftXt functions
|
| 106 |
+
Result cufftXtSetGPUs(Handle plan, int nGPUs, int* gpus)
|
| 107 |
+
Result cufftXtSetWorkArea(Handle plan, void** workArea)
|
| 108 |
+
Result cufftXtMemcpy(Handle plan, void *dst, void *src, XtCopyType type)
|
| 109 |
+
Result cufftXtExecDescriptorC2C(Handle plan, XtArray* idata,
|
| 110 |
+
XtArray* odata, int direction)
|
| 111 |
+
Result cufftXtExecDescriptorZ2Z(Handle plan, XtArray* idata,
|
| 112 |
+
XtArray* odata, int direction)
|
| 113 |
+
Result cufftXtMakePlanMany(Handle plan, int rank, long long int* n,
|
| 114 |
+
long long int* inembed,
|
| 115 |
+
long long int istride,
|
| 116 |
+
long long int idist,
|
| 117 |
+
DataType inputtype,
|
| 118 |
+
long long int* onembed,
|
| 119 |
+
long long int ostride,
|
| 120 |
+
long long int odist,
|
| 121 |
+
DataType outputtype,
|
| 122 |
+
long long int batch, size_t* workSize,
|
| 123 |
+
DataType executiontype)
|
| 124 |
+
Result cufftXtExec(Handle plan, void* inarr, void* outarr, int d)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
IF CUPY_CUFFT_STATIC:
|
| 128 |
+
# cuFFT callback
|
| 129 |
+
cdef extern from 'cupy_cufftXt.h' nogil:
|
| 130 |
+
ctypedef enum callbackType 'cufftXtCallbackType':
|
| 131 |
+
pass
|
| 132 |
+
Result set_callback(Handle, callbackType, bint, void**)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
cdef dict RESULT = {
|
| 136 |
+
0: 'CUFFT_SUCCESS',
|
| 137 |
+
1: 'CUFFT_INVALID_PLAN',
|
| 138 |
+
2: 'CUFFT_ALLOC_FAILED',
|
| 139 |
+
3: 'CUFFT_INVALID_TYPE',
|
| 140 |
+
4: 'CUFFT_INVALID_VALUE',
|
| 141 |
+
5: 'CUFFT_INTERNAL_ERROR',
|
| 142 |
+
6: 'CUFFT_EXEC_FAILED',
|
| 143 |
+
7: 'CUFFT_SETUP_FAILED',
|
| 144 |
+
8: 'CUFFT_INVALID_SIZE',
|
| 145 |
+
9: 'CUFFT_UNALIGNED_DATA',
|
| 146 |
+
10: 'CUFFT_INCOMPLETE_PARAMETER_LIST',
|
| 147 |
+
11: 'CUFFT_INVALID_DEVICE',
|
| 148 |
+
12: 'CUFFT_PARSE_ERROR',
|
| 149 |
+
13: 'CUFFT_NO_WORKSPACE',
|
| 150 |
+
14: 'CUFFT_NOT_IMPLEMENTED',
|
| 151 |
+
15: 'CUFFT_LICENSE_ERROR',
|
| 152 |
+
16: 'CUFFT_NOT_SUPPORTED',
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
class CuFFTError(RuntimeError):
|
| 157 |
+
|
| 158 |
+
def __init__(self, int result):
|
| 159 |
+
self.result = result
|
| 160 |
+
super(CuFFTError, self).__init__('%s' % (RESULT[result]))
|
| 161 |
+
|
| 162 |
+
def __reduce__(self):
|
| 163 |
+
return (type(self), (self.result,))
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
@cython.profile(False)
|
| 167 |
+
cpdef inline void check_result(int result) except *:
|
| 168 |
+
if result != 0:
|
| 169 |
+
raise CuFFTError(result)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
cpdef int getVersion() except? -1:
|
| 173 |
+
cdef int version, result
|
| 174 |
+
result = cufftGetVersion(&version)
|
| 175 |
+
check_result(result)
|
| 176 |
+
return version
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# This is necessary for single-batch transforms: "when batch is one, data is
|
| 180 |
+
# left in the GPU memory in a permutation of the natural output", see
|
| 181 |
+
# https://docs.nvidia.com/cuda/cufft/index.html#multiple-GPU-cufft-intermediate-helper # NOQA
|
| 182 |
+
cdef _reorder_buffers(Handle plan, intptr_t xtArr, list xtArr_buffer):
|
| 183 |
+
cdef int i, result, nGPUs
|
| 184 |
+
cdef intptr_t temp_xtArr
|
| 185 |
+
cdef XtArray* temp_arr
|
| 186 |
+
cdef XtArray* arr
|
| 187 |
+
cdef list gpus = []
|
| 188 |
+
cdef list sizes = []
|
| 189 |
+
cdef list temp_xtArr_buffer
|
| 190 |
+
|
| 191 |
+
arr = <XtArray*>xtArr
|
| 192 |
+
nGPUs = len(xtArr_buffer)
|
| 193 |
+
assert nGPUs == arr.descriptor.nGPUs
|
| 194 |
+
|
| 195 |
+
# allocate another buffer to prepare for order conversion
|
| 196 |
+
for i in range(nGPUs):
|
| 197 |
+
gpus.append(arr.descriptor.GPUs[i])
|
| 198 |
+
sizes.append(arr.descriptor.size[i])
|
| 199 |
+
temp_xtArr, temp_xtArr_buffer = _XtMalloc(gpus, sizes,
|
| 200 |
+
CUFFT_XT_FORMAT_INPLACE)
|
| 201 |
+
temp_arr = <XtArray*>temp_xtArr
|
| 202 |
+
|
| 203 |
+
# Make a device copy to bring the data from the permuted order back to
|
| 204 |
+
# the natural order. Note that this works because after FFT
|
| 205 |
+
# arr.subFormat is silently changed to CUFFT_XT_FORMAT_INPLACE_SHUFFLED
|
| 206 |
+
with nogil:
|
| 207 |
+
result = cufftXtMemcpy(plan, <void*>temp_arr, <void*>arr,
|
| 208 |
+
CUFFT_COPY_DEVICE_TO_DEVICE)
|
| 209 |
+
check_result(result)
|
| 210 |
+
|
| 211 |
+
for i in range(nGPUs):
|
| 212 |
+
# swap MemoryPointer in xtArr_buffer
|
| 213 |
+
temp = temp_xtArr_buffer[i]
|
| 214 |
+
temp_xtArr_buffer[i] = xtArr_buffer[i]
|
| 215 |
+
xtArr_buffer[i] = temp
|
| 216 |
+
|
| 217 |
+
# swap pointer in xtArr
|
| 218 |
+
arr.descriptor.data[i] = temp_arr.descriptor.data[i]
|
| 219 |
+
assert arr.descriptor.size[i] == temp_arr.descriptor.size[i]
|
| 220 |
+
temp_arr.descriptor.data[i] = NULL
|
| 221 |
+
temp_arr.descriptor.size[i] = 0
|
| 222 |
+
|
| 223 |
+
# temp_xtArr now points to the old data, which is now in temp_xtArr_buffer
|
| 224 |
+
# and will be deallocated after this line (out of scope)
|
| 225 |
+
_XtFree(temp_xtArr)
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
# This is meant to replace cufftXtMalloc().
|
| 229 |
+
# We need to manage the buffers ourselves in order to 1. avoid excessive,
|
| 230 |
+
# uncessary memory usage, and 2. use CuPy's memory pool.
|
| 231 |
+
cdef _XtMalloc(list gpus, list sizes, XtSubFormat fmt):
|
| 232 |
+
cdef XtArrayDesc* xtArr_desc
|
| 233 |
+
cdef XtArray* xtArr
|
| 234 |
+
cdef list xtArr_buffer = []
|
| 235 |
+
cdef int i, nGPUs
|
| 236 |
+
cdef size_t size
|
| 237 |
+
|
| 238 |
+
nGPUs = len(gpus)
|
| 239 |
+
assert nGPUs == len(sizes)
|
| 240 |
+
xtArr_desc = <XtArrayDesc*>PyMem_Malloc(sizeof(XtArrayDesc))
|
| 241 |
+
xtArr = <XtArray*>PyMem_Malloc(sizeof(XtArray))
|
| 242 |
+
c_memset(xtArr_desc, 0, sizeof(XtArrayDesc))
|
| 243 |
+
c_memset(xtArr, 0, sizeof(XtArray))
|
| 244 |
+
|
| 245 |
+
xtArr_desc.nGPUs = nGPUs
|
| 246 |
+
for i, (gpu, size) in enumerate(zip(gpus, sizes)):
|
| 247 |
+
prev_device = runtime.getDevice()
|
| 248 |
+
runtime.setDevice(gpu)
|
| 249 |
+
try:
|
| 250 |
+
buf = memory.alloc(size)
|
| 251 |
+
finally:
|
| 252 |
+
runtime.setDevice(prev_device)
|
| 253 |
+
assert gpu == buf.device_id
|
| 254 |
+
xtArr_buffer.append(buf)
|
| 255 |
+
xtArr_desc.GPUs[i] = gpu
|
| 256 |
+
xtArr_desc.data[i] = <void*><intptr_t>(buf.ptr)
|
| 257 |
+
xtArr_desc.size[i] = size
|
| 258 |
+
|
| 259 |
+
xtArr.descriptor = xtArr_desc
|
| 260 |
+
xtArr.subFormat = fmt
|
| 261 |
+
|
| 262 |
+
return <intptr_t>xtArr, xtArr_buffer
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
# This is meant to replace cufftXtFree().
|
| 266 |
+
# We only free the C structs. The underlying GPU buffers are deallocated when
|
| 267 |
+
# going out of scope.
|
| 268 |
+
cdef _XtFree(intptr_t ptr):
|
| 269 |
+
cdef XtArray* xtArr = <XtArray*>ptr
|
| 270 |
+
cdef XtArrayDesc* xtArr_desc = xtArr.descriptor
|
| 271 |
+
PyMem_Free(xtArr_desc)
|
| 272 |
+
PyMem_Free(xtArr)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
cdef class Plan1d:
|
| 276 |
+
def __init__(self, int nx, int fft_type, int batch, *,
|
| 277 |
+
devices=None, out=None):
|
| 278 |
+
cdef Handle plan
|
| 279 |
+
cdef bint use_multi_gpus = 0 if devices is None else 1
|
| 280 |
+
cdef int result
|
| 281 |
+
|
| 282 |
+
self.handle = <intptr_t>0
|
| 283 |
+
self.xtArr = <intptr_t>0 # pointer to metadata for multi-GPU buffer
|
| 284 |
+
self.xtArr_buffer = None # actual multi-GPU intermediate buffer
|
| 285 |
+
|
| 286 |
+
with nogil:
|
| 287 |
+
result = cufftCreate(&plan)
|
| 288 |
+
if result == 0:
|
| 289 |
+
result = cufftSetAutoAllocation(plan, 0)
|
| 290 |
+
check_result(result)
|
| 291 |
+
|
| 292 |
+
self.handle = <intptr_t>plan
|
| 293 |
+
self.work_area = None
|
| 294 |
+
self.gpus = None
|
| 295 |
+
|
| 296 |
+
self.gather_streams = None
|
| 297 |
+
self.gather_events = None
|
| 298 |
+
self.scatter_streams = None
|
| 299 |
+
self.scatter_events = None
|
| 300 |
+
|
| 301 |
+
if batch != 0:
|
| 302 |
+
# set plan, work_area, gpus, streams, and events
|
| 303 |
+
if not use_multi_gpus:
|
| 304 |
+
self._single_gpu_get_plan(plan, nx, fft_type, batch)
|
| 305 |
+
else:
|
| 306 |
+
self._multi_gpu_get_plan(
|
| 307 |
+
plan, nx, fft_type, batch, devices, out)
|
| 308 |
+
else:
|
| 309 |
+
if use_multi_gpus:
|
| 310 |
+
# multi-GPU FFT cannot transform 0-size arrays, and attempting
|
| 311 |
+
# to create such a plan will error out, but we still need this
|
| 312 |
+
# for bookkeeping
|
| 313 |
+
if isinstance(devices, (tuple, list)):
|
| 314 |
+
self.gpus = list(devices)
|
| 315 |
+
elif isinstance(devices, int) and devices > 0:
|
| 316 |
+
self.gpus = [i for i in range(int)]
|
| 317 |
+
else:
|
| 318 |
+
raise ValueError
|
| 319 |
+
|
| 320 |
+
self.nx = nx
|
| 321 |
+
self.fft_type = <Type>fft_type
|
| 322 |
+
self.batch = batch
|
| 323 |
+
self.batch_share = None
|
| 324 |
+
|
| 325 |
+
cdef void _single_gpu_get_plan(self, Handle plan, int nx, int fft_type,
|
| 326 |
+
int batch) except*:
|
| 327 |
+
cdef int result
|
| 328 |
+
cdef size_t work_size
|
| 329 |
+
cdef intptr_t ptr
|
| 330 |
+
|
| 331 |
+
with nogil:
|
| 332 |
+
result = cufftMakePlan1d(plan, nx, <Type>fft_type, batch,
|
| 333 |
+
&work_size)
|
| 334 |
+
|
| 335 |
+
# cufftMakePlan1d uses large memory when nx has large divisor.
|
| 336 |
+
# See https://github.com/cupy/cupy/issues/1063
|
| 337 |
+
if result == 2:
|
| 338 |
+
cupy.get_default_memory_pool().free_all_blocks()
|
| 339 |
+
with nogil:
|
| 340 |
+
result = cufftMakePlan1d(plan, nx, <Type>fft_type, batch,
|
| 341 |
+
&work_size)
|
| 342 |
+
check_result(result)
|
| 343 |
+
|
| 344 |
+
work_area = memory.alloc(work_size)
|
| 345 |
+
ptr = <intptr_t>(work_area.ptr)
|
| 346 |
+
with nogil:
|
| 347 |
+
result = cufftSetWorkArea(plan, <void*>(ptr))
|
| 348 |
+
check_result(result)
|
| 349 |
+
|
| 350 |
+
self.work_area = work_area # this is for cuFFT plan
|
| 351 |
+
|
| 352 |
+
cdef void _multi_gpu_get_plan(self, Handle plan, int nx, int fft_type,
|
| 353 |
+
int batch, devices, out) except*:
|
| 354 |
+
cdef int nGPUs, min_len, result
|
| 355 |
+
cdef vector.vector[int] gpus
|
| 356 |
+
cdef vector.vector[size_t] work_size
|
| 357 |
+
cdef list work_area = []
|
| 358 |
+
cdef list gather_streams = []
|
| 359 |
+
cdef list gather_events = []
|
| 360 |
+
cdef vector.vector[void*] work_area_ptr
|
| 361 |
+
|
| 362 |
+
# some sanity checks
|
| 363 |
+
if runtime.is_hip:
|
| 364 |
+
raise RuntimeError('hipFFT/rocFFT does not support multi-GPU FFT')
|
| 365 |
+
if fft_type != CUFFT_C2C and fft_type != CUFFT_Z2Z:
|
| 366 |
+
raise ValueError('Currently for multiple GPUs only C2C and Z2Z are'
|
| 367 |
+
' supported.')
|
| 368 |
+
if isinstance(devices, (tuple, list)):
|
| 369 |
+
nGPUs = len(devices)
|
| 370 |
+
for i in range(nGPUs):
|
| 371 |
+
gpus.push_back(devices[i])
|
| 372 |
+
elif isinstance(devices, int):
|
| 373 |
+
nGPUs = devices
|
| 374 |
+
for i in range(nGPUs):
|
| 375 |
+
gpus.push_back(i)
|
| 376 |
+
else:
|
| 377 |
+
raise ValueError('\"devices\" should be an int or an iterable '
|
| 378 |
+
'of int.')
|
| 379 |
+
if batch == 1:
|
| 380 |
+
if (nx & (nx - 1)) != 0:
|
| 381 |
+
raise ValueError('For multi-GPU FFT with batch = 1, the array '
|
| 382 |
+
'size must be a power of 2.')
|
| 383 |
+
if nGPUs not in (2, 4, 8, 16):
|
| 384 |
+
raise ValueError('For multi-GPU FFT with batch = 1, the number'
|
| 385 |
+
' of devices must be 2, 4, 8, or 16.')
|
| 386 |
+
if nGPUs in (2, 4):
|
| 387 |
+
min_len = 64
|
| 388 |
+
elif nGPUs == 8:
|
| 389 |
+
min_len = 128
|
| 390 |
+
else: # nGPU = 16
|
| 391 |
+
min_len = 1024
|
| 392 |
+
if nx < min_len:
|
| 393 |
+
raise ValueError('For {} GPUs, the array length must be at '
|
| 394 |
+
'least {} (you have {}).'
|
| 395 |
+
.format(nGPUs, min_len, nx))
|
| 396 |
+
work_size.resize(nGPUs)
|
| 397 |
+
|
| 398 |
+
with nogil:
|
| 399 |
+
result = cufftXtSetGPUs(plan, nGPUs, gpus.data())
|
| 400 |
+
if result == 0:
|
| 401 |
+
result = cufftMakePlan1d(plan, nx, <Type>fft_type, batch,
|
| 402 |
+
work_size.data())
|
| 403 |
+
|
| 404 |
+
# cufftMakePlan1d uses large memory when nx has large divisor.
|
| 405 |
+
# See https://github.com/cupy/cupy/issues/1063
|
| 406 |
+
if result == 2:
|
| 407 |
+
cupy.get_default_memory_pool().free_all_blocks()
|
| 408 |
+
with nogil:
|
| 409 |
+
result = cufftMakePlan1d(plan, nx, <Type>fft_type, batch,
|
| 410 |
+
work_size.data())
|
| 411 |
+
check_result(result)
|
| 412 |
+
|
| 413 |
+
for i in range(nGPUs):
|
| 414 |
+
prev_device = runtime.getDevice()
|
| 415 |
+
runtime.setDevice(gpus[i])
|
| 416 |
+
try:
|
| 417 |
+
buf = memory.alloc(work_size[i])
|
| 418 |
+
s = stream.Stream()
|
| 419 |
+
e = stream.Event()
|
| 420 |
+
finally:
|
| 421 |
+
runtime.setDevice(prev_device)
|
| 422 |
+
work_area.append(buf)
|
| 423 |
+
work_area_ptr.push_back(<void*><intptr_t>(buf.ptr))
|
| 424 |
+
gather_streams.append(s)
|
| 425 |
+
gather_events.append(e)
|
| 426 |
+
with nogil:
|
| 427 |
+
result = cufftXtSetWorkArea(plan, work_area_ptr.data())
|
| 428 |
+
check_result(result)
|
| 429 |
+
|
| 430 |
+
self.work_area = work_area # this is for cuFFT plan
|
| 431 |
+
self.gpus = list(gpus)
|
| 432 |
+
|
| 433 |
+
# For async, overlapped copies. We need to distinguish scatter and
|
| 434 |
+
# gather because for async memcpy, the stream is on the source device
|
| 435 |
+
self.gather_streams = gather_streams
|
| 436 |
+
self.gather_events = gather_events
|
| 437 |
+
self.scatter_streams = {}
|
| 438 |
+
self.scatter_events = {}
|
| 439 |
+
self._multi_gpu_get_scatter_streams_events(runtime.getDevice())
|
| 440 |
+
|
| 441 |
+
def _multi_gpu_get_scatter_streams_events(self, int curr_device):
|
| 442 |
+
'''
|
| 443 |
+
create a list of streams and events on the current device
|
| 444 |
+
'''
|
| 445 |
+
cdef int i
|
| 446 |
+
cdef list scatter_streams = []
|
| 447 |
+
cdef list scatter_events = []
|
| 448 |
+
|
| 449 |
+
assert curr_device in self.gpus
|
| 450 |
+
prev_device = runtime.getDevice()
|
| 451 |
+
runtime.setDevice(curr_device)
|
| 452 |
+
try:
|
| 453 |
+
for i in self.gpus:
|
| 454 |
+
scatter_streams.append(stream.Stream())
|
| 455 |
+
scatter_events.append(stream.Event())
|
| 456 |
+
finally:
|
| 457 |
+
runtime.setDevice(prev_device)
|
| 458 |
+
self.scatter_streams[curr_device] = scatter_streams
|
| 459 |
+
self.scatter_events[curr_device] = scatter_events
|
| 460 |
+
|
| 461 |
+
def __dealloc__(self):
|
| 462 |
+
cdef Handle plan = <Handle>self.handle
|
| 463 |
+
cdef int dev, result
|
| 464 |
+
|
| 465 |
+
if self.xtArr != 0:
|
| 466 |
+
_XtFree(self.xtArr)
|
| 467 |
+
self.xtArr = 0
|
| 468 |
+
|
| 469 |
+
try:
|
| 470 |
+
dev = runtime.getDevice()
|
| 471 |
+
except Exception as e:
|
| 472 |
+
# hack: the runtime module is purged at interpreter shutdown,
|
| 473 |
+
# since this is not a __del__ method, we can't use
|
| 474 |
+
# cupy._util.is_shutting_down()...
|
| 475 |
+
return
|
| 476 |
+
|
| 477 |
+
if plan != <Handle>0:
|
| 478 |
+
with nogil:
|
| 479 |
+
result = cufftDestroy(plan)
|
| 480 |
+
check_result(result)
|
| 481 |
+
self.handle = <intptr_t>0
|
| 482 |
+
|
| 483 |
+
# cuFFT bug: after cufftDestroy(), the current device is mistakenly
|
| 484 |
+
# set to the last device in self.gpus, so we must correct it. See
|
| 485 |
+
# https://github.com/cupy/cupy/pull/2644#discussion_r347567899 and
|
| 486 |
+
# NVIDIA internal ticket 2761341.
|
| 487 |
+
runtime.setDevice(dev)
|
| 488 |
+
|
| 489 |
+
def __enter__(self):
|
| 490 |
+
_thread_local._current_plan = self
|
| 491 |
+
return self
|
| 492 |
+
|
| 493 |
+
def __exit__(self, exc_type, exc_value, traceback):
|
| 494 |
+
_thread_local._current_plan = None
|
| 495 |
+
|
| 496 |
+
def fft(self, a, out, direction):
|
| 497 |
+
if self.gpus is not None:
|
| 498 |
+
self._multi_gpu_fft(a, out, direction)
|
| 499 |
+
else:
|
| 500 |
+
self._single_gpu_fft(a, out, direction)
|
| 501 |
+
|
| 502 |
+
def _single_gpu_fft(self, a, out, direction):
|
| 503 |
+
cdef intptr_t plan = self.handle
|
| 504 |
+
cdef intptr_t s = stream.get_current_stream().ptr
|
| 505 |
+
cdef int result
|
| 506 |
+
|
| 507 |
+
with nogil:
|
| 508 |
+
result = cufftSetStream(<Handle>plan, <Stream>s)
|
| 509 |
+
check_result(result)
|
| 510 |
+
|
| 511 |
+
if self.fft_type == CUFFT_C2C:
|
| 512 |
+
execC2C(plan, a.data.ptr, out.data.ptr, direction)
|
| 513 |
+
elif self.fft_type == CUFFT_R2C:
|
| 514 |
+
execR2C(plan, a.data.ptr, out.data.ptr)
|
| 515 |
+
elif self.fft_type == CUFFT_C2R:
|
| 516 |
+
execC2R(plan, a.data.ptr, out.data.ptr)
|
| 517 |
+
elif self.fft_type == CUFFT_Z2Z:
|
| 518 |
+
execZ2Z(plan, a.data.ptr, out.data.ptr, direction)
|
| 519 |
+
elif self.fft_type == CUFFT_D2Z:
|
| 520 |
+
execD2Z(plan, a.data.ptr, out.data.ptr)
|
| 521 |
+
elif self.fft_type == CUFFT_Z2D:
|
| 522 |
+
execZ2D(plan, a.data.ptr, out.data.ptr)
|
| 523 |
+
else:
|
| 524 |
+
raise ValueError
|
| 525 |
+
|
| 526 |
+
def _multi_gpu_setup_buffer(self, a):
|
| 527 |
+
cdef XtArrayDesc* xtArr_desc
|
| 528 |
+
cdef XtArray* xtArr
|
| 529 |
+
cdef intptr_t ptr
|
| 530 |
+
cdef list xtArr_buffer, share, sizes
|
| 531 |
+
cdef int i, nGPUs
|
| 532 |
+
cdef XtSubFormat fmt
|
| 533 |
+
|
| 534 |
+
# First, get the buffers:
|
| 535 |
+
# We need to manage the buffers ourselves in order to avoid excessive,
|
| 536 |
+
# uncessary memory usage. Note that these buffers are used for in-place
|
| 537 |
+
# transforms, and are re-used (lifetime tied to the plan).
|
| 538 |
+
|
| 539 |
+
if isinstance(a, cupy.ndarray) or isinstance(a, numpy.ndarray):
|
| 540 |
+
if self.xtArr == 0 and self.xtArr_buffer is None:
|
| 541 |
+
nGPUs = len(self.gpus)
|
| 542 |
+
|
| 543 |
+
# this is the rule for distributing the workload
|
| 544 |
+
if self.batch > 1:
|
| 545 |
+
share = [self.batch // nGPUs] * nGPUs
|
| 546 |
+
for i in range(self.batch % nGPUs):
|
| 547 |
+
share[i] += 1
|
| 548 |
+
else:
|
| 549 |
+
share = [1.0 / nGPUs] * nGPUs
|
| 550 |
+
sizes = [int(share[i] * self.nx * a.dtype.itemsize)
|
| 551 |
+
for i in range(nGPUs)]
|
| 552 |
+
|
| 553 |
+
# get buffer
|
| 554 |
+
if isinstance(a, cupy.ndarray):
|
| 555 |
+
fmt = CUFFT_XT_FORMAT_INPLACE
|
| 556 |
+
else: # from numpy
|
| 557 |
+
fmt = CUFFT_XT_FORMAT_1D_INPUT_SHUFFLED
|
| 558 |
+
ptr, xtArr_buffer = _XtMalloc(self.gpus, sizes, fmt)
|
| 559 |
+
|
| 560 |
+
xtArr = <XtArray*>ptr
|
| 561 |
+
xtArr_desc = xtArr.descriptor
|
| 562 |
+
assert xtArr_desc.nGPUs == nGPUs
|
| 563 |
+
|
| 564 |
+
self.batch_share = share
|
| 565 |
+
self.xtArr = ptr
|
| 566 |
+
self.xtArr_buffer = xtArr_buffer # kept to ensure lifetime
|
| 567 |
+
else:
|
| 568 |
+
# After FFT the subFormat flag is silently changed to
|
| 569 |
+
# CUFFT_XT_FORMAT_INPLACE_SHUFFLED. For reuse we must correct
|
| 570 |
+
# it, otherwise in the next run we would encounter
|
| 571 |
+
# CUFFT_INVALID_TYPE!
|
| 572 |
+
ptr = self.xtArr
|
| 573 |
+
xtArr = <XtArray*>ptr
|
| 574 |
+
if self.batch == 1:
|
| 575 |
+
if isinstance(a, cupy.ndarray):
|
| 576 |
+
fmt = CUFFT_XT_FORMAT_INPLACE
|
| 577 |
+
else: # from numpy
|
| 578 |
+
fmt = CUFFT_XT_FORMAT_1D_INPUT_SHUFFLED
|
| 579 |
+
xtArr.subFormat = fmt
|
| 580 |
+
elif isinstance(a, list):
|
| 581 |
+
# TODO(leofang): For users running Plan1d.fft() (bypassing all
|
| 582 |
+
# checks in cupy.fft.fft), they are allowed to send in a list of
|
| 583 |
+
# ndarrays, each of which is on a different GPU. Then, no data
|
| 584 |
+
# copy is needed, just replace the pointers in the descriptor.
|
| 585 |
+
raise NotImplementedError('User-managed buffer area is not yet '
|
| 586 |
+
'supported.')
|
| 587 |
+
else:
|
| 588 |
+
raise ValueError('Impossible to reach.')
|
| 589 |
+
|
| 590 |
+
def _multi_gpu_memcpy(self, a, str action):
|
| 591 |
+
cdef Handle plan = <Handle>self.handle
|
| 592 |
+
cdef list xtArr_buffer, share
|
| 593 |
+
cdef int nGPUs, dev, s_device, start, count, result
|
| 594 |
+
cdef XtArray* arr
|
| 595 |
+
cdef intptr_t ptr, ptr2
|
| 596 |
+
cdef size_t size
|
| 597 |
+
|
| 598 |
+
assert isinstance(a, (cupy.ndarray, numpy.ndarray))
|
| 599 |
+
|
| 600 |
+
start = 0
|
| 601 |
+
assert a.flags.c_contiguous # NumPy does not have _c_contiguous
|
| 602 |
+
b = a.ravel()
|
| 603 |
+
assert b.flags['OWNDATA'] is False
|
| 604 |
+
assert self.xtArr_buffer is not None
|
| 605 |
+
ptr = self.xtArr
|
| 606 |
+
arr = <XtArray*>ptr
|
| 607 |
+
xtArr_buffer = self.xtArr_buffer
|
| 608 |
+
nGPUs = len(self.gpus)
|
| 609 |
+
share = self.batch_share
|
| 610 |
+
|
| 611 |
+
if action == 'scatter':
|
| 612 |
+
if isinstance(a, cupy.ndarray):
|
| 613 |
+
s_device = b.data.device_id
|
| 614 |
+
if s_device not in self.scatter_streams:
|
| 615 |
+
self._multi_gpu_get_scatter_streams_events(s_device)
|
| 616 |
+
|
| 617 |
+
# When we come here, another stream could still be
|
| 618 |
+
# copying data for us, so we wait patiently...
|
| 619 |
+
outer_stream = stream.get_current_stream()
|
| 620 |
+
outer_stream.synchronize()
|
| 621 |
+
|
| 622 |
+
for dev in range(nGPUs):
|
| 623 |
+
count = int(share[dev] * self.nx)
|
| 624 |
+
size = count * b.dtype.itemsize
|
| 625 |
+
curr_stream = self.scatter_streams[s_device][dev]
|
| 626 |
+
curr_event = self.scatter_events[s_device][dev]
|
| 627 |
+
xtArr_buffer[dev].copy_from_device_async(
|
| 628 |
+
b[start:start+count].data, size, curr_stream)
|
| 629 |
+
if dev != 0:
|
| 630 |
+
prev_event = self.scatter_events[s_device][dev-1]
|
| 631 |
+
curr_stream.wait_event(prev_event)
|
| 632 |
+
curr_event.record(curr_stream)
|
| 633 |
+
start += count
|
| 634 |
+
assert start == b.size
|
| 635 |
+
self.scatter_events[s_device][-1].synchronize()
|
| 636 |
+
else: # numpy
|
| 637 |
+
ptr2 = b.ctypes.data
|
| 638 |
+
with nogil:
|
| 639 |
+
result = cufftXtMemcpy(
|
| 640 |
+
plan, <void*>arr, <void*>ptr2,
|
| 641 |
+
CUFFT_COPY_HOST_TO_DEVICE)
|
| 642 |
+
check_result(result)
|
| 643 |
+
elif action == 'gather':
|
| 644 |
+
if isinstance(a, cupy.ndarray):
|
| 645 |
+
if self.batch == 1:
|
| 646 |
+
_reorder_buffers(plan, self.xtArr, xtArr_buffer)
|
| 647 |
+
|
| 648 |
+
# When we come here, another stream could still be
|
| 649 |
+
# copying data for us, so we wait patiently...
|
| 650 |
+
outer_stream = stream.get_current_stream()
|
| 651 |
+
outer_stream.synchronize()
|
| 652 |
+
|
| 653 |
+
for i in range(nGPUs):
|
| 654 |
+
count = int(share[i] * self.nx)
|
| 655 |
+
size = count * b.dtype.itemsize
|
| 656 |
+
curr_stream = self.gather_streams[i]
|
| 657 |
+
curr_event = self.gather_events[i]
|
| 658 |
+
b[start:start+count].data.copy_from_device_async(
|
| 659 |
+
xtArr_buffer[i], size, curr_stream)
|
| 660 |
+
if i != 0:
|
| 661 |
+
prev_event = self.gather_events[i-1]
|
| 662 |
+
curr_stream.wait_event(prev_event)
|
| 663 |
+
curr_event.record(curr_stream)
|
| 664 |
+
start += count
|
| 665 |
+
assert start == b.size
|
| 666 |
+
self.gather_events[-1].synchronize()
|
| 667 |
+
else: # numpy
|
| 668 |
+
ptr2 = b.ctypes.data
|
| 669 |
+
with nogil:
|
| 670 |
+
result = cufftXtMemcpy(
|
| 671 |
+
plan, <void*>ptr2, <void*>arr,
|
| 672 |
+
CUFFT_COPY_DEVICE_TO_HOST)
|
| 673 |
+
check_result(result)
|
| 674 |
+
else:
|
| 675 |
+
raise ValueError
|
| 676 |
+
|
| 677 |
+
def _multi_gpu_fft(self, a, out, direction):
|
| 678 |
+
# When we arrive here, the normal CuPy call path ensures a and out
|
| 679 |
+
# reside on the same GPU -> must distribute a to all of the GPUs
|
| 680 |
+
self._multi_gpu_setup_buffer(a)
|
| 681 |
+
|
| 682 |
+
# Next, copy data to buffer
|
| 683 |
+
self._multi_gpu_memcpy(a, 'scatter')
|
| 684 |
+
|
| 685 |
+
# Actual workhorses
|
| 686 |
+
# Note: mult-GPU plans cannot set stream
|
| 687 |
+
cdef intptr_t plan = self.handle
|
| 688 |
+
if self.fft_type == CUFFT_C2C:
|
| 689 |
+
multi_gpu_execC2C(plan, self.xtArr, self.xtArr, direction)
|
| 690 |
+
elif self.fft_type == CUFFT_Z2Z:
|
| 691 |
+
multi_gpu_execZ2Z(plan, self.xtArr, self.xtArr, direction)
|
| 692 |
+
else:
|
| 693 |
+
raise ValueError
|
| 694 |
+
|
| 695 |
+
# Gather the distributed outputs
|
| 696 |
+
self._multi_gpu_memcpy(out, 'gather')
|
| 697 |
+
|
| 698 |
+
def _output_dtype_and_shape(self, a):
|
| 699 |
+
shape = list(a.shape)
|
| 700 |
+
if self.fft_type == CUFFT_C2C:
|
| 701 |
+
dtype = numpy.complex64
|
| 702 |
+
elif self.fft_type == CUFFT_R2C:
|
| 703 |
+
shape[-1] = shape[-1] // 2 + 1
|
| 704 |
+
dtype = numpy.complex64
|
| 705 |
+
elif self.fft_type == CUFFT_C2R:
|
| 706 |
+
shape[-1] = self.nx
|
| 707 |
+
dtype = numpy.float32
|
| 708 |
+
elif self.fft_type == CUFFT_Z2Z:
|
| 709 |
+
dtype = numpy.complex128
|
| 710 |
+
elif self.fft_type == CUFFT_D2Z:
|
| 711 |
+
shape[-1] = shape[-1] // 2 + 1
|
| 712 |
+
dtype = numpy.complex128
|
| 713 |
+
else:
|
| 714 |
+
shape[-1] = self.nx
|
| 715 |
+
dtype = numpy.float64
|
| 716 |
+
return tuple(shape), dtype
|
| 717 |
+
|
| 718 |
+
def get_output_array(self, a):
|
| 719 |
+
shape, dtype = self._output_dtype_and_shape(a)
|
| 720 |
+
return cupy.empty(shape, dtype)
|
| 721 |
+
|
| 722 |
+
def check_output_array(self, a, out):
|
| 723 |
+
"""Verify shape and dtype of the output array.
|
| 724 |
+
|
| 725 |
+
Parameters
|
| 726 |
+
----------
|
| 727 |
+
a : cupy.array
|
| 728 |
+
The input to the transform
|
| 729 |
+
out : cupy.array
|
| 730 |
+
The array where the output of the transform will be stored.
|
| 731 |
+
"""
|
| 732 |
+
shape, dtype = self._output_dtype_and_shape(a)
|
| 733 |
+
if out.shape != shape:
|
| 734 |
+
raise ValueError(
|
| 735 |
+
('out must have shape {}.').format(shape))
|
| 736 |
+
if out.dtype != dtype:
|
| 737 |
+
raise ValueError(
|
| 738 |
+
'out dtype mismatch: found {}, expected {}'.format(
|
| 739 |
+
out.dtype, dtype))
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
cdef class PlanNd:
|
| 743 |
+
def __init__(self, object shape, object inembed, int istride,
|
| 744 |
+
int idist, object onembed, int ostride, int odist,
|
| 745 |
+
int fft_type, int batch, str order, int last_axis, last_size):
|
| 746 |
+
cdef Handle plan
|
| 747 |
+
cdef size_t work_size
|
| 748 |
+
cdef int ndim, result
|
| 749 |
+
cdef vector.vector[int] shape_arr = shape
|
| 750 |
+
cdef vector.vector[int] inembed_arr
|
| 751 |
+
cdef vector.vector[int] onembed_arr
|
| 752 |
+
cdef int* shape_ptr = shape_arr.data()
|
| 753 |
+
cdef int* inembed_ptr
|
| 754 |
+
cdef int* onembed_ptr
|
| 755 |
+
cdef intptr_t ptr
|
| 756 |
+
|
| 757 |
+
self.handle = <intptr_t>0
|
| 758 |
+
ndim = len(shape)
|
| 759 |
+
|
| 760 |
+
if inembed is None:
|
| 761 |
+
inembed_ptr = NULL # ignore istride and use default strides
|
| 762 |
+
else:
|
| 763 |
+
inembed_arr = inembed
|
| 764 |
+
inembed_ptr = inembed_arr.data()
|
| 765 |
+
|
| 766 |
+
if onembed is None:
|
| 767 |
+
onembed_ptr = NULL # ignore ostride and use default strides
|
| 768 |
+
else:
|
| 769 |
+
onembed_arr = onembed
|
| 770 |
+
onembed_ptr = onembed_arr.data()
|
| 771 |
+
|
| 772 |
+
with nogil:
|
| 773 |
+
result = cufftCreate(&plan)
|
| 774 |
+
if result == 0:
|
| 775 |
+
result = cufftSetAutoAllocation(plan, 0)
|
| 776 |
+
check_result(result)
|
| 777 |
+
|
| 778 |
+
self.handle = <intptr_t>plan
|
| 779 |
+
self.gpus = None # TODO(leofang): support multi-GPU PlanNd
|
| 780 |
+
|
| 781 |
+
if batch == 0:
|
| 782 |
+
work_size = 0
|
| 783 |
+
else:
|
| 784 |
+
with nogil:
|
| 785 |
+
result = cufftMakePlanMany(plan, ndim, shape_ptr,
|
| 786 |
+
inembed_ptr, istride, idist,
|
| 787 |
+
onembed_ptr, ostride, odist,
|
| 788 |
+
<Type>fft_type, batch,
|
| 789 |
+
&work_size)
|
| 790 |
+
|
| 791 |
+
# cufftMakePlanMany could use a large amount of memory
|
| 792 |
+
if result == 2:
|
| 793 |
+
cupy.get_default_memory_pool().free_all_blocks()
|
| 794 |
+
with nogil:
|
| 795 |
+
result = cufftMakePlanMany(plan, ndim, shape_ptr,
|
| 796 |
+
inembed_ptr, istride, idist,
|
| 797 |
+
onembed_ptr, ostride, odist,
|
| 798 |
+
<Type>fft_type, batch,
|
| 799 |
+
&work_size)
|
| 800 |
+
check_result(result)
|
| 801 |
+
|
| 802 |
+
# TODO: for CUDA>=9.2 could also allow setting a work area policy
|
| 803 |
+
# result = cufftXtSetWorkAreaPolicy(plan, policy, &work_size)
|
| 804 |
+
|
| 805 |
+
work_area = memory.alloc(work_size)
|
| 806 |
+
ptr = <intptr_t>(work_area.ptr)
|
| 807 |
+
with nogil:
|
| 808 |
+
result = cufftSetWorkArea(plan, <void*>(ptr))
|
| 809 |
+
check_result(result)
|
| 810 |
+
|
| 811 |
+
self.shape = tuple(shape)
|
| 812 |
+
self.fft_type = <Type>fft_type
|
| 813 |
+
self.work_area = work_area
|
| 814 |
+
self.order = order # either 'C' or 'F'
|
| 815 |
+
self.last_axis = last_axis # ignored for C2C
|
| 816 |
+
self.last_size = last_size # = None (and ignored) for C2C
|
| 817 |
+
|
| 818 |
+
def __dealloc__(self):
|
| 819 |
+
cdef Handle plan = <Handle>self.handle
|
| 820 |
+
cdef int result
|
| 821 |
+
|
| 822 |
+
if plan != <Handle>0:
|
| 823 |
+
with nogil:
|
| 824 |
+
result = cufftDestroy(plan)
|
| 825 |
+
check_result(result)
|
| 826 |
+
self.handle = <intptr_t>0
|
| 827 |
+
|
| 828 |
+
def __enter__(self):
|
| 829 |
+
_thread_local._current_plan = self
|
| 830 |
+
return self
|
| 831 |
+
|
| 832 |
+
def __exit__(self, exc_type, exc_value, traceback):
|
| 833 |
+
_thread_local._current_plan = None
|
| 834 |
+
|
| 835 |
+
def fft(self, a, out, direction):
|
| 836 |
+
cdef intptr_t plan = self.handle
|
| 837 |
+
cdef intptr_t s = stream.get_current_stream().ptr
|
| 838 |
+
cdef int result
|
| 839 |
+
|
| 840 |
+
with nogil:
|
| 841 |
+
result = cufftSetStream(<Handle>plan, <Stream>s)
|
| 842 |
+
check_result(result)
|
| 843 |
+
|
| 844 |
+
if self.fft_type == CUFFT_C2C:
|
| 845 |
+
execC2C(plan, a.data.ptr, out.data.ptr, direction)
|
| 846 |
+
elif self.fft_type == CUFFT_R2C:
|
| 847 |
+
execR2C(plan, a.data.ptr, out.data.ptr)
|
| 848 |
+
elif self.fft_type == CUFFT_C2R:
|
| 849 |
+
execC2R(plan, a.data.ptr, out.data.ptr)
|
| 850 |
+
elif self.fft_type == CUFFT_Z2Z:
|
| 851 |
+
execZ2Z(plan, a.data.ptr, out.data.ptr, direction)
|
| 852 |
+
elif self.fft_type == CUFFT_D2Z:
|
| 853 |
+
execD2Z(plan, a.data.ptr, out.data.ptr)
|
| 854 |
+
elif self.fft_type == CUFFT_Z2D:
|
| 855 |
+
execZ2D(plan, a.data.ptr, out.data.ptr)
|
| 856 |
+
else:
|
| 857 |
+
raise ValueError
|
| 858 |
+
|
| 859 |
+
def _output_dtype_and_shape(self, a):
|
| 860 |
+
shape = list(a.shape)
|
| 861 |
+
if self.fft_type == CUFFT_C2C:
|
| 862 |
+
dtype = numpy.complex64
|
| 863 |
+
elif self.fft_type == CUFFT_R2C:
|
| 864 |
+
shape[self.last_axis] = self.last_size
|
| 865 |
+
dtype = numpy.complex64
|
| 866 |
+
elif self.fft_type == CUFFT_C2R:
|
| 867 |
+
shape[self.last_axis] = self.last_size
|
| 868 |
+
dtype = numpy.float32
|
| 869 |
+
elif self.fft_type == CUFFT_Z2Z:
|
| 870 |
+
dtype = numpy.complex128
|
| 871 |
+
elif self.fft_type == CUFFT_D2Z:
|
| 872 |
+
shape[self.last_axis] = self.last_size
|
| 873 |
+
dtype = numpy.complex128
|
| 874 |
+
else: # CUFFT_Z2D
|
| 875 |
+
shape[self.last_axis] = self.last_size
|
| 876 |
+
dtype = numpy.float64
|
| 877 |
+
return tuple(shape), dtype
|
| 878 |
+
|
| 879 |
+
def get_output_array(self, a, order='C'):
|
| 880 |
+
shape, dtype = self._output_dtype_and_shape(a)
|
| 881 |
+
return cupy.empty(shape, dtype, order=order)
|
| 882 |
+
|
| 883 |
+
def check_output_array(self, a, out):
|
| 884 |
+
if out is a:
|
| 885 |
+
# TODO(leofang): think about in-place transforms for C2R & R2C
|
| 886 |
+
return
|
| 887 |
+
if self.fft_type in (CUFFT_C2C, CUFFT_Z2Z):
|
| 888 |
+
if out.shape != a.shape:
|
| 889 |
+
raise ValueError('output shape mismatch')
|
| 890 |
+
if out.dtype != a.dtype:
|
| 891 |
+
raise ValueError('output dtype mismatch')
|
| 892 |
+
else:
|
| 893 |
+
if out.ndim != a.ndim:
|
| 894 |
+
raise ValueError('output dimension mismatch')
|
| 895 |
+
for i, size in enumerate(out.shape):
|
| 896 |
+
if (i != self.last_axis and size != a.shape[i]) or \
|
| 897 |
+
(i == self.last_axis and size != self.last_size):
|
| 898 |
+
raise ValueError('output shape is incorrecct')
|
| 899 |
+
if self.fft_type in (CUFFT_R2C, CUFFT_D2Z):
|
| 900 |
+
if out.dtype != cupy.dtype(a.dtype.char.upper()):
|
| 901 |
+
raise ValueError('output dtype is unexpected')
|
| 902 |
+
else: # CUFFT_C2R or CUFFT_Z2D
|
| 903 |
+
if out.dtype != cupy.dtype(a.dtype.char.lower()):
|
| 904 |
+
raise ValueError('output dtype is unexpected')
|
| 905 |
+
if not ((out.flags.f_contiguous == a.flags.f_contiguous) and
|
| 906 |
+
(out.flags.c_contiguous == a.flags.c_contiguous)):
|
| 907 |
+
raise ValueError('output contiguity mismatch')
|
| 908 |
+
|
| 909 |
+
|
| 910 |
+
# TODO(leofang): Unify with PlanND?!
|
| 911 |
+
# TODO(leofang): support cufftXtSetGPUs?
|
| 912 |
+
cdef class XtPlanNd:
|
| 913 |
+
def __init__(self, shape,
|
| 914 |
+
inembed, long long int istride, long long int idist, idtype,
|
| 915 |
+
onembed, long long int ostride, long long int odist, odtype,
|
| 916 |
+
long long int batch, edtype, *,
|
| 917 |
+
str order, int last_axis, last_size):
|
| 918 |
+
# Note: we don't pass in fft_type here because it's redundant and
|
| 919 |
+
# does not cover exotic types like complex32 or bf16
|
| 920 |
+
|
| 921 |
+
cdef Handle plan
|
| 922 |
+
cdef size_t work_size
|
| 923 |
+
cdef int ndim, result
|
| 924 |
+
cdef vector.vector[long long int] shape_arr = shape
|
| 925 |
+
cdef vector.vector[long long int] inembed_arr
|
| 926 |
+
cdef vector.vector[long long int] onembed_arr
|
| 927 |
+
cdef long long int* shape_ptr = shape_arr.data()
|
| 928 |
+
cdef long long int* inembed_ptr
|
| 929 |
+
cdef long long int* onembed_ptr
|
| 930 |
+
|
| 931 |
+
self.handle = <intptr_t>0
|
| 932 |
+
ndim = len(shape)
|
| 933 |
+
|
| 934 |
+
if inembed is None:
|
| 935 |
+
inembed_ptr = NULL # ignore istride and use default strides
|
| 936 |
+
else:
|
| 937 |
+
inembed_arr = inembed
|
| 938 |
+
inembed_ptr = inembed_arr.data()
|
| 939 |
+
|
| 940 |
+
if onembed is None:
|
| 941 |
+
onembed_ptr = NULL # ignore ostride and use default strides
|
| 942 |
+
else:
|
| 943 |
+
onembed_arr = onembed
|
| 944 |
+
onembed_ptr = onembed_arr.data()
|
| 945 |
+
|
| 946 |
+
with nogil:
|
| 947 |
+
result = cufftCreate(&plan)
|
| 948 |
+
if result == 0:
|
| 949 |
+
result = cufftSetAutoAllocation(plan, 0)
|
| 950 |
+
check_result(result)
|
| 951 |
+
|
| 952 |
+
self.handle = <intptr_t>plan
|
| 953 |
+
self.gpus = None # TODO(leofang): support multi-GPU plans
|
| 954 |
+
|
| 955 |
+
# determine input/output/execution types here; note that we don't
|
| 956 |
+
# cimport to_cuda_dtype due to circular dependency
|
| 957 |
+
from cupy._core._dtype import to_cuda_dtype
|
| 958 |
+
cdef int itype = to_cuda_dtype(idtype, True)
|
| 959 |
+
cdef int otype = to_cuda_dtype(odtype, True)
|
| 960 |
+
cdef int etype = to_cuda_dtype(edtype, True)
|
| 961 |
+
|
| 962 |
+
cdef long long int length
|
| 963 |
+
cdef long long int full = 1
|
| 964 |
+
for length in shape:
|
| 965 |
+
full *= length
|
| 966 |
+
length = last_size if last_size is not None else shape[-1]
|
| 967 |
+
try:
|
| 968 |
+
self._sanity_checks(itype, otype, etype, length, full)
|
| 969 |
+
except AssertionError:
|
| 970 |
+
raise ValueError('input/output/execution types mismatch')
|
| 971 |
+
|
| 972 |
+
if batch == 0:
|
| 973 |
+
work_size = 0
|
| 974 |
+
else:
|
| 975 |
+
with nogil:
|
| 976 |
+
result = cufftXtMakePlanMany(
|
| 977 |
+
plan, ndim, shape_ptr,
|
| 978 |
+
inembed_ptr, istride, idist, <DataType>itype,
|
| 979 |
+
onembed_ptr, ostride, odist, <DataType>otype,
|
| 980 |
+
batch, &work_size, <DataType>etype)
|
| 981 |
+
|
| 982 |
+
# cufftMakePlanMany could use a large amount of memory
|
| 983 |
+
if result == 2:
|
| 984 |
+
cupy.get_default_memory_pool().free_all_blocks()
|
| 985 |
+
with nogil:
|
| 986 |
+
result = cufftXtMakePlanMany(
|
| 987 |
+
plan, ndim, shape_ptr,
|
| 988 |
+
inembed_ptr, istride, idist, <DataType>itype,
|
| 989 |
+
onembed_ptr, ostride, odist, <DataType>otype,
|
| 990 |
+
batch, &work_size, <DataType>etype)
|
| 991 |
+
check_result(result)
|
| 992 |
+
|
| 993 |
+
work_area = memory.alloc(work_size)
|
| 994 |
+
cdef intptr_t ptr = <intptr_t>(work_area.ptr)
|
| 995 |
+
with nogil:
|
| 996 |
+
result = cufftSetWorkArea(plan, <void*>(ptr))
|
| 997 |
+
check_result(result)
|
| 998 |
+
|
| 999 |
+
self.shape = tuple(shape)
|
| 1000 |
+
self.itype = itype
|
| 1001 |
+
self.otype = otype
|
| 1002 |
+
self.etype = etype
|
| 1003 |
+
self.work_area = work_area
|
| 1004 |
+
self.order = order # either 'C' or 'F'
|
| 1005 |
+
self.last_axis = last_axis # ignored for C2C
|
| 1006 |
+
self.last_size = last_size # = None (and ignored) for C2C
|
| 1007 |
+
|
| 1008 |
+
def __dealloc__(self):
|
| 1009 |
+
cdef Handle plan = <Handle>self.handle
|
| 1010 |
+
cdef int result
|
| 1011 |
+
|
| 1012 |
+
if plan != <Handle>0:
|
| 1013 |
+
with nogil:
|
| 1014 |
+
result = cufftDestroy(plan)
|
| 1015 |
+
check_result(result)
|
| 1016 |
+
self.handle = <intptr_t>0
|
| 1017 |
+
|
| 1018 |
+
def __enter__(self):
|
| 1019 |
+
_thread_local._current_plan = self
|
| 1020 |
+
return self
|
| 1021 |
+
|
| 1022 |
+
def __exit__(self, exc_type, exc_value, traceback):
|
| 1023 |
+
_thread_local._current_plan = None
|
| 1024 |
+
|
| 1025 |
+
def fft(self, a, out, direction):
|
| 1026 |
+
cdef intptr_t plan = self.handle
|
| 1027 |
+
cdef intptr_t s = stream.get_current_stream().ptr
|
| 1028 |
+
cdef int result
|
| 1029 |
+
|
| 1030 |
+
with nogil:
|
| 1031 |
+
result = cufftSetStream(<Handle>plan, <Stream>s)
|
| 1032 |
+
check_result(result)
|
| 1033 |
+
XtExec(plan, a.data.ptr, out.data.ptr, direction)
|
| 1034 |
+
|
| 1035 |
+
def _sanity_checks(self, int itype, int otype, int etype,
|
| 1036 |
+
long long int last_size, long long int full_size):
|
| 1037 |
+
# not every possible type combination is legit
|
| 1038 |
+
# TODO(leofang): support bf16?
|
| 1039 |
+
# C2C
|
| 1040 |
+
if itype == runtime.CUDA_C_16F and otype == runtime.CUDA_C_16F:
|
| 1041 |
+
assert etype == runtime.CUDA_C_16F
|
| 1042 |
+
elif itype == runtime.CUDA_C_32F and otype == runtime.CUDA_C_32F:
|
| 1043 |
+
assert etype == runtime.CUDA_C_32F
|
| 1044 |
+
elif itype == runtime.CUDA_C_64F and otype == runtime.CUDA_C_64F:
|
| 1045 |
+
assert etype == runtime.CUDA_C_64F
|
| 1046 |
+
# C2R
|
| 1047 |
+
elif itype == runtime.CUDA_C_16F and otype == runtime.CUDA_R_16F:
|
| 1048 |
+
assert etype == runtime.CUDA_C_16F
|
| 1049 |
+
elif itype == runtime.CUDA_C_32F and otype == runtime.CUDA_R_32F:
|
| 1050 |
+
assert etype == runtime.CUDA_C_32F
|
| 1051 |
+
elif itype == runtime.CUDA_C_64F and otype == runtime.CUDA_R_64F:
|
| 1052 |
+
assert etype == runtime.CUDA_C_64F
|
| 1053 |
+
# R2C
|
| 1054 |
+
elif itype == runtime.CUDA_R_16F and otype == runtime.CUDA_C_16F:
|
| 1055 |
+
assert etype == runtime.CUDA_C_16F
|
| 1056 |
+
elif itype == runtime.CUDA_R_32F and otype == runtime.CUDA_C_32F:
|
| 1057 |
+
assert etype == runtime.CUDA_C_32F
|
| 1058 |
+
elif itype == runtime.CUDA_R_64F and otype == runtime.CUDA_C_64F:
|
| 1059 |
+
assert etype == runtime.CUDA_C_64F
|
| 1060 |
+
else:
|
| 1061 |
+
assert False
|
| 1062 |
+
|
| 1063 |
+
# check fp16 runtime constraints
|
| 1064 |
+
# https://docs.nvidia.com/cuda/cufft/index.html#half-precision-transforms
|
| 1065 |
+
if etype == runtime.CUDA_C_16F:
|
| 1066 |
+
if int(device.get_compute_capability()) < 53:
|
| 1067 |
+
raise RuntimeError("this device doesn't support complex32 FFT")
|
| 1068 |
+
if (last_size & (last_size - 1)) != 0:
|
| 1069 |
+
raise ValueError('size must be power of 2')
|
| 1070 |
+
if full_size > 4000000000:
|
| 1071 |
+
raise ValueError('input array too large')
|
| 1072 |
+
# TODO(leofang): check if multi-GPU is requested
|
| 1073 |
+
# TODO(leofang): also check for bf16?
|
| 1074 |
+
# https://docs.nvidia.com/cuda/cufft/index.html#bfloat16-precision-transforms
|
| 1075 |
+
|
| 1076 |
+
def _output_dtype_and_shape(self, a):
|
| 1077 |
+
shape = list(a.shape)
|
| 1078 |
+
if self.itype != self.otype: # R2C or C2R
|
| 1079 |
+
shape[self.last_axis] = self.last_size
|
| 1080 |
+
if self.otype == runtime.CUDA_C_16F:
|
| 1081 |
+
# dtype = numpy.complex32
|
| 1082 |
+
raise NotImplementedError('complex32 is not supported yet, please '
|
| 1083 |
+
'allocate the output array manually')
|
| 1084 |
+
elif self.otype == runtime.CUDA_C_32F:
|
| 1085 |
+
dtype = numpy.complex64
|
| 1086 |
+
elif self.otype == runtime.CUDA_C_64F:
|
| 1087 |
+
dtype = numpy.complex128
|
| 1088 |
+
elif self.otype == runtime.CUDA_R_16F:
|
| 1089 |
+
dtype = numpy.float16
|
| 1090 |
+
elif self.otype == runtime.CUDA_R_32F:
|
| 1091 |
+
dtype = numpy.float32
|
| 1092 |
+
elif self.otype == runtime.CUDA_R_64F:
|
| 1093 |
+
dtype = numpy.float64
|
| 1094 |
+
return tuple(shape), dtype
|
| 1095 |
+
|
| 1096 |
+
def get_output_array(self, a, order='C'):
|
| 1097 |
+
shape, dtype = self._output_dtype_and_shape(a)
|
| 1098 |
+
return cupy.empty(shape, dtype, order=order)
|
| 1099 |
+
|
| 1100 |
+
|
| 1101 |
+
cpdef execC2C(intptr_t plan, intptr_t idata, intptr_t odata, int direction):
|
| 1102 |
+
cdef Handle h = <Handle>plan
|
| 1103 |
+
cdef int result
|
| 1104 |
+
|
| 1105 |
+
with nogil:
|
| 1106 |
+
result = cufftExecC2C(h, <Complex*>idata, <Complex*>odata,
|
| 1107 |
+
direction)
|
| 1108 |
+
check_result(result)
|
| 1109 |
+
|
| 1110 |
+
|
| 1111 |
+
cpdef execR2C(intptr_t plan, intptr_t idata, intptr_t odata):
|
| 1112 |
+
cdef Handle h = <Handle>plan
|
| 1113 |
+
cdef int result
|
| 1114 |
+
|
| 1115 |
+
with nogil:
|
| 1116 |
+
result = cufftExecR2C(h, <Float*>idata, <Complex*>odata)
|
| 1117 |
+
check_result(result)
|
| 1118 |
+
|
| 1119 |
+
|
| 1120 |
+
cpdef execC2R(intptr_t plan, intptr_t idata, intptr_t odata):
|
| 1121 |
+
cdef Handle h = <Handle>plan
|
| 1122 |
+
cdef int result
|
| 1123 |
+
|
| 1124 |
+
with nogil:
|
| 1125 |
+
result = cufftExecC2R(h, <Complex*>idata, <Float*>odata)
|
| 1126 |
+
check_result(result)
|
| 1127 |
+
|
| 1128 |
+
|
| 1129 |
+
cpdef execZ2Z(intptr_t plan, intptr_t idata, intptr_t odata, int direction):
|
| 1130 |
+
cdef Handle h = <Handle>plan
|
| 1131 |
+
cdef int result
|
| 1132 |
+
|
| 1133 |
+
with nogil:
|
| 1134 |
+
result = cufftExecZ2Z(h, <DoubleComplex*>idata,
|
| 1135 |
+
<DoubleComplex*>odata, direction)
|
| 1136 |
+
check_result(result)
|
| 1137 |
+
|
| 1138 |
+
|
| 1139 |
+
cpdef execD2Z(intptr_t plan, intptr_t idata, intptr_t odata):
|
| 1140 |
+
cdef Handle h = <Handle>plan
|
| 1141 |
+
cdef int result
|
| 1142 |
+
|
| 1143 |
+
with nogil:
|
| 1144 |
+
result = cufftExecD2Z(h, <Double*>idata, <DoubleComplex*>odata)
|
| 1145 |
+
check_result(result)
|
| 1146 |
+
|
| 1147 |
+
|
| 1148 |
+
cpdef execZ2D(intptr_t plan, intptr_t idata, intptr_t odata):
|
| 1149 |
+
cdef Handle h = <Handle>plan
|
| 1150 |
+
cdef int result
|
| 1151 |
+
|
| 1152 |
+
with nogil:
|
| 1153 |
+
result = cufftExecZ2D(h, <DoubleComplex*>idata, <Double*>odata)
|
| 1154 |
+
check_result(result)
|
| 1155 |
+
|
| 1156 |
+
|
| 1157 |
+
cpdef multi_gpu_execC2C(intptr_t plan, intptr_t idata, intptr_t odata,
|
| 1158 |
+
int direction):
|
| 1159 |
+
cdef Handle h = <Handle>plan
|
| 1160 |
+
cdef int result
|
| 1161 |
+
|
| 1162 |
+
with nogil:
|
| 1163 |
+
result = cufftXtExecDescriptorC2C(h, <XtArray*>idata,
|
| 1164 |
+
<XtArray*>odata, direction)
|
| 1165 |
+
check_result(result)
|
| 1166 |
+
|
| 1167 |
+
|
| 1168 |
+
cpdef multi_gpu_execZ2Z(intptr_t plan, intptr_t idata, intptr_t odata,
|
| 1169 |
+
int direction):
|
| 1170 |
+
cdef Handle h = <Handle>plan
|
| 1171 |
+
cdef int result
|
| 1172 |
+
|
| 1173 |
+
with nogil:
|
| 1174 |
+
result = cufftXtExecDescriptorZ2Z(h, <XtArray*>idata,
|
| 1175 |
+
<XtArray*>odata, direction)
|
| 1176 |
+
check_result(result)
|
| 1177 |
+
|
| 1178 |
+
|
| 1179 |
+
cpdef XtExec(intptr_t plan, intptr_t idata, intptr_t odata, int direction):
|
| 1180 |
+
cdef Handle h = <Handle>plan
|
| 1181 |
+
cdef int result
|
| 1182 |
+
|
| 1183 |
+
with nogil:
|
| 1184 |
+
result = cufftXtExec(h, <void*>idata, <void*>odata, direction)
|
| 1185 |
+
check_result(result)
|
| 1186 |
+
|
| 1187 |
+
|
| 1188 |
+
cpdef intptr_t setCallback(
|
| 1189 |
+
intptr_t plan, int cb_type, bint is_load, intptr_t aux_arr=0):
|
| 1190 |
+
cdef Handle h = <Handle>plan # no-cython-lint
|
| 1191 |
+
cdef int result # no-cython-lint
|
| 1192 |
+
cdef void** callerInfo # no-cython-lint
|
| 1193 |
+
|
| 1194 |
+
IF CUPY_CUFFT_STATIC:
|
| 1195 |
+
with nogil:
|
| 1196 |
+
if aux_arr > 0:
|
| 1197 |
+
callerInfo = (<void**>(&aux_arr))
|
| 1198 |
+
else:
|
| 1199 |
+
callerInfo = NULL
|
| 1200 |
+
result = set_callback(
|
| 1201 |
+
h, <callbackType>cb_type, is_load, callerInfo)
|
| 1202 |
+
check_result(result)
|
| 1203 |
+
ELSE:
|
| 1204 |
+
raise RuntimeError('cuFFT is dynamically linked and thus does not '
|
| 1205 |
+
'support callback')
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cupy_cub.cu
ADDED
|
@@ -0,0 +1,1189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "cupy_cub.h" // need to make atomicAdd visible to CUB templates early
|
| 2 |
+
#include <cupy/type_dispatcher.cuh>
|
| 3 |
+
|
| 4 |
+
#ifndef CUPY_USE_HIP
|
| 5 |
+
#include <cfloat> // For FLT_MAX definitions
|
| 6 |
+
#include <cub/device/device_reduce.cuh>
|
| 7 |
+
#include <cub/device/device_segmented_reduce.cuh>
|
| 8 |
+
#include <cub/device/device_spmv.cuh>
|
| 9 |
+
#include <cub/device/device_scan.cuh>
|
| 10 |
+
#include <cub/device/device_histogram.cuh>
|
| 11 |
+
#include <cub/iterator/counting_input_iterator.cuh>
|
| 12 |
+
#include <cub/iterator/transform_input_iterator.cuh>
|
| 13 |
+
#include <cuda/functional>
|
| 14 |
+
#include <cuda/std/functional>
|
| 15 |
+
#else
|
| 16 |
+
#include <hipcub/device/device_reduce.hpp>
|
| 17 |
+
#include <hipcub/device/device_segmented_reduce.hpp>
|
| 18 |
+
#include <hipcub/device/device_scan.hpp>
|
| 19 |
+
#include <hipcub/device/device_histogram.hpp>
|
| 20 |
+
#include <rocprim/iterator/counting_iterator.hpp>
|
| 21 |
+
#include <hipcub/iterator/transform_input_iterator.hpp>
|
| 22 |
+
#endif
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
/* ------------------------------------ Minimum boilerplate to support complex numbers ------------------------------------ */
|
| 26 |
+
#ifndef CUPY_USE_HIP
|
| 27 |
+
// - This works only because all data fields in the *Traits struct are not
|
| 28 |
+
// used in <cub/device/device_reduce.cuh>.
|
| 29 |
+
// - The Max() and Lowest() below are chosen to comply with NumPy's lexical
|
| 30 |
+
// ordering; note that std::numeric_limits<T> does not support complex
|
| 31 |
+
// numbers as in general the comparison is ill defined.
|
| 32 |
+
// - DO NOT USE THIS STUB for supporting CUB sorting!!!!!!
|
| 33 |
+
using namespace cub;
|
| 34 |
+
#define CUPY_CUB_NAMESPACE cub
|
| 35 |
+
|
| 36 |
+
template <>
|
| 37 |
+
struct FpLimits<complex<float>>
|
| 38 |
+
{
|
| 39 |
+
static __host__ __device__ __forceinline__ complex<float> Max() {
|
| 40 |
+
return (complex<float>(FLT_MAX, FLT_MAX));
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
static __host__ __device__ __forceinline__ complex<float> Lowest() {
|
| 44 |
+
return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1)));
|
| 45 |
+
}
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
template <>
|
| 49 |
+
struct FpLimits<complex<double>>
|
| 50 |
+
{
|
| 51 |
+
static __host__ __device__ __forceinline__ complex<double> Max() {
|
| 52 |
+
return (complex<double>(DBL_MAX, DBL_MAX));
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
static __host__ __device__ __forceinline__ complex<double> Lowest() {
|
| 56 |
+
return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1)));
|
| 57 |
+
}
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {};
|
| 61 |
+
template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {};
|
| 62 |
+
|
| 63 |
+
// need specializations for initial values
|
| 64 |
+
namespace std {
|
| 65 |
+
|
| 66 |
+
template <>
|
| 67 |
+
class numeric_limits<thrust::complex<float>> {
|
| 68 |
+
public:
|
| 69 |
+
static __host__ __device__ thrust::complex<float> infinity() noexcept {
|
| 70 |
+
return thrust::complex<float>(std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity());
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
static constexpr bool has_infinity = true;
|
| 74 |
+
};
|
| 75 |
+
|
| 76 |
+
template <>
|
| 77 |
+
class numeric_limits<thrust::complex<double>> {
|
| 78 |
+
public:
|
| 79 |
+
static __host__ __device__ thrust::complex<double> infinity() noexcept {
|
| 80 |
+
return thrust::complex<double>(std::numeric_limits<double>::infinity(), std::numeric_limits<double>::infinity());
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
static constexpr bool has_infinity = true;
|
| 84 |
+
};
|
| 85 |
+
|
| 86 |
+
template <>
|
| 87 |
+
class numeric_limits<__half> {
|
| 88 |
+
public:
|
| 89 |
+
static __host__ __device__ constexpr __half infinity() noexcept {
|
| 90 |
+
unsigned short inf_half = 0x7C00U;
|
| 91 |
+
#if (defined(_MSC_VER) && _MSC_VER >= 1920)
|
| 92 |
+
#if CUDA_VERSION < 11030
|
| 93 |
+
// WAR: CUDA 11.2.x + VS 2019 fails with __builtin_bit_cast
|
| 94 |
+
union caster {
|
| 95 |
+
unsigned short u_;
|
| 96 |
+
__half h_;
|
| 97 |
+
};
|
| 98 |
+
return caster{inf_half}.h_;
|
| 99 |
+
#else // CUDA_VERSION < 11030
|
| 100 |
+
// WAR:
|
| 101 |
+
// - we want a constexpr here, but reinterpret_cast cannot be used
|
| 102 |
+
// - we want to use std::bit_cast, but it requires C++20 which is too new
|
| 103 |
+
// - we use the compiler builtin, fortunately both gcc and msvc have it
|
| 104 |
+
return __builtin_bit_cast(__half, inf_half);
|
| 105 |
+
#endif
|
| 106 |
+
#else
|
| 107 |
+
return *reinterpret_cast<__half*>(&inf_half);
|
| 108 |
+
#endif
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
static constexpr bool has_infinity = true;
|
| 112 |
+
};
|
| 113 |
+
|
| 114 |
+
} // namespace std
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
#else
|
| 118 |
+
|
| 119 |
+
// hipCUB internally uses std::numeric_limits, so we should provide specializations for the complex numbers.
|
| 120 |
+
// Note that there's std::complex, so to avoid name collision we must use the full decoration (thrust::complex)!
|
| 121 |
+
// TODO(leofang): wrap CuPy's thrust namespace with another one (say, cupy::thrust) for safer scope resolution?
|
| 122 |
+
#define CUPY_CUB_NAMESPACE hipcub
|
| 123 |
+
|
| 124 |
+
namespace std {
|
| 125 |
+
template <>
|
| 126 |
+
class numeric_limits<thrust::complex<float>> {
|
| 127 |
+
public:
|
| 128 |
+
static __host__ __device__ thrust::complex<float> max() noexcept {
|
| 129 |
+
return thrust::complex<float>(std::numeric_limits<float>::max(), std::numeric_limits<float>::max());
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
static __host__ __device__ thrust::complex<float> lowest() noexcept {
|
| 133 |
+
return thrust::complex<float>(-std::numeric_limits<float>::max(), -std::numeric_limits<float>::max());
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
static __host__ __device__ thrust::complex<float> infinity() noexcept {
|
| 137 |
+
return thrust::complex<float>(std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity());
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
static constexpr bool has_infinity = true;
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
template <>
|
| 144 |
+
class numeric_limits<thrust::complex<double>> {
|
| 145 |
+
public:
|
| 146 |
+
static __host__ __device__ thrust::complex<double> max() noexcept {
|
| 147 |
+
return thrust::complex<double>(std::numeric_limits<double>::max(), std::numeric_limits<double>::max());
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
static __host__ __device__ thrust::complex<double> lowest() noexcept {
|
| 151 |
+
return thrust::complex<double>(-std::numeric_limits<double>::max(), -std::numeric_limits<double>::max());
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
static __host__ __device__ thrust::complex<double> infinity() noexcept {
|
| 155 |
+
return thrust::complex<double>(std::numeric_limits<double>::infinity(), std::numeric_limits<double>::infinity());
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
static constexpr bool has_infinity = true;
|
| 159 |
+
};
|
| 160 |
+
|
| 161 |
+
// Copied from https://github.com/ROCmSoftwarePlatform/hipCUB/blob/master-rocm-3.5/hipcub/include/hipcub/backend/rocprim/device/device_reduce.hpp
|
| 162 |
+
// (For some reason the specialization for __half defined in the above file does not work, so we have to go
|
| 163 |
+
// through the same route as we did above for complex numbers.)
|
| 164 |
+
template <>
|
| 165 |
+
class numeric_limits<__half> {
|
| 166 |
+
public:
|
| 167 |
+
static __host__ __device__ __half max() noexcept {
|
| 168 |
+
unsigned short max_half = 0x7bff;
|
| 169 |
+
__half max_value = *reinterpret_cast<__half*>(&max_half);
|
| 170 |
+
return max_value;
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
static __host__ __device__ __half lowest() noexcept {
|
| 174 |
+
unsigned short lowest_half = 0xfbff;
|
| 175 |
+
__half lowest_value = *reinterpret_cast<__half*>(&lowest_half);
|
| 176 |
+
return lowest_value;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
static __host__ __device__ __half infinity() noexcept {
|
| 180 |
+
unsigned short inf_half = 0x7C00U;
|
| 181 |
+
__half inf_value = *reinterpret_cast<__half*>(&inf_half);
|
| 182 |
+
return inf_value;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
static constexpr bool has_infinity = true;
|
| 186 |
+
};
|
| 187 |
+
} // namespace std
|
| 188 |
+
|
| 189 |
+
using namespace hipcub;
|
| 190 |
+
|
| 191 |
+
#endif // ifndef CUPY_USE_HIP
|
| 192 |
+
|
| 193 |
+
__host__ __device__ __half half_negate_inf() {
|
| 194 |
+
unsigned short minf_half = 0xFC00U;
|
| 195 |
+
__half* minf_value = reinterpret_cast<__half*>(&minf_half);
|
| 196 |
+
return *minf_value;
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
/* ------------------------------------ end of boilerplate ------------------------------------ */
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
/* ------------------------------------ "Patches" to CUB ------------------------------------
|
| 203 |
+
This stub is needed because CUB does not have a built-in "prod" operator
|
| 204 |
+
*/
|
| 205 |
+
|
| 206 |
+
//
|
| 207 |
+
// product functor
|
| 208 |
+
//
|
| 209 |
+
#ifdef CUPY_USE_HIP
|
| 210 |
+
struct _multiply
|
| 211 |
+
{
|
| 212 |
+
template <typename T>
|
| 213 |
+
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
|
| 214 |
+
{
|
| 215 |
+
return a * b;
|
| 216 |
+
}
|
| 217 |
+
};
|
| 218 |
+
#else
|
| 219 |
+
using _multiply = cuda::std::multiplies<>;
|
| 220 |
+
#endif
|
| 221 |
+
|
| 222 |
+
//
|
| 223 |
+
// arange functor: arange(0, n+1) -> arange(0, n+1, step_size)
|
| 224 |
+
//
|
| 225 |
+
struct _arange
|
| 226 |
+
{
|
| 227 |
+
private:
|
| 228 |
+
int step_size;
|
| 229 |
+
|
| 230 |
+
public:
|
| 231 |
+
__host__ __device__ __forceinline__ _arange(int i): step_size(i) {}
|
| 232 |
+
__host__ __device__ __forceinline__ int operator()(const int &in) const {
|
| 233 |
+
return step_size * in;
|
| 234 |
+
}
|
| 235 |
+
};
|
| 236 |
+
|
| 237 |
+
#ifndef CUPY_USE_HIP
|
| 238 |
+
typedef TransformInputIterator<int, _arange, CountingInputIterator<int>> seg_offset_itr;
|
| 239 |
+
#else
|
| 240 |
+
typedef TransformInputIterator<int, _arange, rocprim::counting_iterator<int>> seg_offset_itr;
|
| 241 |
+
#endif
|
| 242 |
+
|
| 243 |
+
/*
|
| 244 |
+
These stubs are needed because CUB does not handle NaNs properly, while NumPy has certain
|
| 245 |
+
behaviors with which we must comply.
|
| 246 |
+
|
| 247 |
+
CUDA/HIP have different signatures for Max/Min because of the recent changes in CCCL (for the former).
|
| 248 |
+
*/
|
| 249 |
+
|
| 250 |
+
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
|
| 251 |
+
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
|
| 252 |
+
__host__ __device__ __forceinline__ bool half_isnan(const __half& x) {
|
| 253 |
+
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
|
| 254 |
+
return __hisnan(x);
|
| 255 |
+
#else
|
| 256 |
+
// TODO: avoid cast to float
|
| 257 |
+
return isnan(__half2float(x));
|
| 258 |
+
#endif
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
__host__ __device__ __forceinline__ bool half_less(const __half& l, const __half& r) {
|
| 262 |
+
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
|
| 263 |
+
return l < r;
|
| 264 |
+
#else
|
| 265 |
+
// TODO: avoid cast to float
|
| 266 |
+
return __half2float(l) < __half2float(r);
|
| 267 |
+
#endif
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
__host__ __device__ __forceinline__ bool half_equal(const __half& l, const __half& r) {
|
| 271 |
+
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
|
| 272 |
+
return l == r;
|
| 273 |
+
#else
|
| 274 |
+
// TODO: avoid cast to float
|
| 275 |
+
return __half2float(l) == __half2float(r);
|
| 276 |
+
#endif
|
| 277 |
+
}
|
| 278 |
+
#endif
|
| 279 |
+
|
| 280 |
+
#ifdef CUPY_USE_HIP
|
| 281 |
+
|
| 282 |
+
//
|
| 283 |
+
// Max()
|
| 284 |
+
//
|
| 285 |
+
|
| 286 |
+
template <typename T>
|
| 287 |
+
struct select_max {
|
| 288 |
+
using type = Max;
|
| 289 |
+
};
|
| 290 |
+
|
| 291 |
+
// specialization for float for handling NaNs
|
| 292 |
+
template <>
|
| 293 |
+
__host__ __device__ __forceinline__ float Max::operator()(const float &a, const float &b) const
|
| 294 |
+
{
|
| 295 |
+
// NumPy behavior: NaN is always chosen!
|
| 296 |
+
if (isnan(a)) {return a;}
|
| 297 |
+
else if (isnan(b)) {return b;}
|
| 298 |
+
else {return a < b ? b : a;}
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
// specialization for double for handling NaNs
|
| 302 |
+
template <>
|
| 303 |
+
__host__ __device__ __forceinline__ double Max::operator()(const double &a, const double &b) const
|
| 304 |
+
{
|
| 305 |
+
// NumPy behavior: NaN is always chosen!
|
| 306 |
+
if (isnan(a)) {return a;}
|
| 307 |
+
else if (isnan(b)) {return b;}
|
| 308 |
+
else {return a < b ? b : a;}
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
// specialization for complex<float> for handling NaNs
|
| 312 |
+
template <>
|
| 313 |
+
__host__ __device__ __forceinline__ complex<float> Max::operator()(const complex<float> &a, const complex<float> &b) const
|
| 314 |
+
{
|
| 315 |
+
// - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed
|
| 316 |
+
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
|
| 317 |
+
// - isnan() and max() are defined in cupy/complex.cuh
|
| 318 |
+
if (isnan(a)) {return a;}
|
| 319 |
+
else if (isnan(b)) {return b;}
|
| 320 |
+
else {return a < b ? b : a;}
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
// specialization for complex<double> for handling NaNs
|
| 324 |
+
template <>
|
| 325 |
+
__host__ __device__ __forceinline__ complex<double> Max::operator()(const complex<double> &a, const complex<double> &b) const
|
| 326 |
+
{
|
| 327 |
+
// - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed
|
| 328 |
+
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
|
| 329 |
+
// - isnan() and max() are defined in cupy/complex.cuh
|
| 330 |
+
if (isnan(a)) {return a;}
|
| 331 |
+
else if (isnan(b)) {return b;}
|
| 332 |
+
else {return a < b ? b : a;}
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
|
| 336 |
+
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
|
| 337 |
+
// specialization for half for handling NaNs
|
| 338 |
+
template <>
|
| 339 |
+
__host__ __device__ __forceinline__ __half Max::operator()(const __half &a, const __half &b) const
|
| 340 |
+
{
|
| 341 |
+
// NumPy behavior: NaN is always chosen!
|
| 342 |
+
if (half_isnan(a)) {return a;}
|
| 343 |
+
else if (half_isnan(b)) {return b;}
|
| 344 |
+
else { return half_less(a, b) ? b : a; }
|
| 345 |
+
}
|
| 346 |
+
#endif
|
| 347 |
+
|
| 348 |
+
//
|
| 349 |
+
// Min()
|
| 350 |
+
//
|
| 351 |
+
|
| 352 |
+
template <typename T>
|
| 353 |
+
struct select_min {
|
| 354 |
+
using type = Min;
|
| 355 |
+
};
|
| 356 |
+
|
| 357 |
+
// specialization for float for handling NaNs
|
| 358 |
+
template <>
|
| 359 |
+
__host__ __device__ __forceinline__ float Min::operator()(const float &a, const float &b) const
|
| 360 |
+
{
|
| 361 |
+
// NumPy behavior: NaN is always chosen!
|
| 362 |
+
if (isnan(a)) {return a;}
|
| 363 |
+
else if (isnan(b)) {return b;}
|
| 364 |
+
else {return a < b ? a : b;}
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
// specialization for double for handling NaNs
|
| 368 |
+
template <>
|
| 369 |
+
__host__ __device__ __forceinline__ double Min::operator()(const double &a, const double &b) const
|
| 370 |
+
{
|
| 371 |
+
// NumPy behavior: NaN is always chosen!
|
| 372 |
+
if (isnan(a)) {return a;}
|
| 373 |
+
else if (isnan(b)) {return b;}
|
| 374 |
+
else {return a < b ? a : b;}
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
// specialization for complex<float> for handling NaNs
|
| 378 |
+
template <>
|
| 379 |
+
__host__ __device__ __forceinline__ complex<float> Min::operator()(const complex<float> &a, const complex<float> &b) const
|
| 380 |
+
{
|
| 381 |
+
// - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed
|
| 382 |
+
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
|
| 383 |
+
// - isnan() and min() are defined in cupy/complex.cuh
|
| 384 |
+
if (isnan(a)) {return a;}
|
| 385 |
+
else if (isnan(b)) {return b;}
|
| 386 |
+
else {return a < b ? a : b;}
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
// specialization for complex<double> for handling NaNs
|
| 390 |
+
template <>
|
| 391 |
+
__host__ __device__ __forceinline__ complex<double> Min::operator()(const complex<double> &a, const complex<double> &b) const
|
| 392 |
+
{
|
| 393 |
+
// - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed
|
| 394 |
+
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
|
| 395 |
+
// - isnan() and min() are defined in cupy/complex.cuh
|
| 396 |
+
if (isnan(a)) {return a;}
|
| 397 |
+
else if (isnan(b)) {return b;}
|
| 398 |
+
else {return a < b ? a : b;}
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
|
| 402 |
+
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
|
| 403 |
+
// specialization for half for handling NaNs
|
| 404 |
+
template <>
|
| 405 |
+
__host__ __device__ __forceinline__ __half Min::operator()(const __half &a, const __half &b) const
|
| 406 |
+
{
|
| 407 |
+
// NumPy behavior: NaN is always chosen!
|
| 408 |
+
if (half_isnan(a)) {return a;}
|
| 409 |
+
else if (half_isnan(b)) {return b;}
|
| 410 |
+
else { return half_less(a, b) ? a : b; }
|
| 411 |
+
}
|
| 412 |
+
#endif
|
| 413 |
+
|
| 414 |
+
#endif // ifdef CUPY_USE_HIP
|
| 415 |
+
|
| 416 |
+
//
|
| 417 |
+
// ArgMax()
|
| 418 |
+
//
|
| 419 |
+
|
| 420 |
+
// specialization for float for handling NaNs
|
| 421 |
+
template <>
|
| 422 |
+
__host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMax::operator()(
|
| 423 |
+
const KeyValuePair<int, float> &a,
|
| 424 |
+
const KeyValuePair<int, float> &b) const
|
| 425 |
+
{
|
| 426 |
+
if (isnan(a.value))
|
| 427 |
+
return a;
|
| 428 |
+
else if (isnan(b.value))
|
| 429 |
+
return b;
|
| 430 |
+
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
|
| 431 |
+
return b;
|
| 432 |
+
else
|
| 433 |
+
return a;
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
// specialization for double for handling NaNs
|
| 437 |
+
template <>
|
| 438 |
+
__host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMax::operator()(
|
| 439 |
+
const KeyValuePair<int, double> &a,
|
| 440 |
+
const KeyValuePair<int, double> &b) const
|
| 441 |
+
{
|
| 442 |
+
if (isnan(a.value))
|
| 443 |
+
return a;
|
| 444 |
+
else if (isnan(b.value))
|
| 445 |
+
return b;
|
| 446 |
+
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
|
| 447 |
+
return b;
|
| 448 |
+
else
|
| 449 |
+
return a;
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
// specialization for complex<float> for handling NaNs
|
| 453 |
+
template <>
|
| 454 |
+
__host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMax::operator()(
|
| 455 |
+
const KeyValuePair<int, complex<float>> &a,
|
| 456 |
+
const KeyValuePair<int, complex<float>> &b) const
|
| 457 |
+
{
|
| 458 |
+
if (isnan(a.value))
|
| 459 |
+
return a;
|
| 460 |
+
else if (isnan(b.value))
|
| 461 |
+
return b;
|
| 462 |
+
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
|
| 463 |
+
return b;
|
| 464 |
+
else
|
| 465 |
+
return a;
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
// specialization for complex<double> for handling NaNs
|
| 469 |
+
template <>
|
| 470 |
+
__host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMax::operator()(
|
| 471 |
+
const KeyValuePair<int, complex<double>> &a,
|
| 472 |
+
const KeyValuePair<int, complex<double>> &b) const
|
| 473 |
+
{
|
| 474 |
+
if (isnan(a.value))
|
| 475 |
+
return a;
|
| 476 |
+
else if (isnan(b.value))
|
| 477 |
+
return b;
|
| 478 |
+
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
|
| 479 |
+
return b;
|
| 480 |
+
else
|
| 481 |
+
return a;
|
| 482 |
+
}
|
| 483 |
+
|
| 484 |
+
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
|
| 485 |
+
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
|
| 486 |
+
// specialization for half for handling NaNs
|
| 487 |
+
template <>
|
| 488 |
+
__host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMax::operator()(
|
| 489 |
+
const KeyValuePair<int, __half> &a,
|
| 490 |
+
const KeyValuePair<int, __half> &b) const
|
| 491 |
+
{
|
| 492 |
+
if (half_isnan(a.value))
|
| 493 |
+
return a;
|
| 494 |
+
else if (half_isnan(b.value))
|
| 495 |
+
return b;
|
| 496 |
+
else if ((half_less(a.value, b.value)) ||
|
| 497 |
+
(half_equal(a.value, b.value) && (b.key < a.key)))
|
| 498 |
+
return b;
|
| 499 |
+
else
|
| 500 |
+
return a;
|
| 501 |
+
}
|
| 502 |
+
#endif
|
| 503 |
+
|
| 504 |
+
//
|
| 505 |
+
// ArgMin()
|
| 506 |
+
//
|
| 507 |
+
|
| 508 |
+
// specialization for float for handling NaNs
|
| 509 |
+
template <>
|
| 510 |
+
__host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMin::operator()(
|
| 511 |
+
const KeyValuePair<int, float> &a,
|
| 512 |
+
const KeyValuePair<int, float> &b) const
|
| 513 |
+
{
|
| 514 |
+
if (isnan(a.value))
|
| 515 |
+
return a;
|
| 516 |
+
else if (isnan(b.value))
|
| 517 |
+
return b;
|
| 518 |
+
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
|
| 519 |
+
return b;
|
| 520 |
+
else
|
| 521 |
+
return a;
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
// specialization for double for handling NaNs
|
| 525 |
+
template <>
|
| 526 |
+
__host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMin::operator()(
|
| 527 |
+
const KeyValuePair<int, double> &a,
|
| 528 |
+
const KeyValuePair<int, double> &b) const
|
| 529 |
+
{
|
| 530 |
+
if (isnan(a.value))
|
| 531 |
+
return a;
|
| 532 |
+
else if (isnan(b.value))
|
| 533 |
+
return b;
|
| 534 |
+
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
|
| 535 |
+
return b;
|
| 536 |
+
else
|
| 537 |
+
return a;
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
// specialization for complex<float> for handling NaNs
|
| 541 |
+
template <>
|
| 542 |
+
__host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMin::operator()(
|
| 543 |
+
const KeyValuePair<int, complex<float>> &a,
|
| 544 |
+
const KeyValuePair<int, complex<float>> &b) const
|
| 545 |
+
{
|
| 546 |
+
if (isnan(a.value))
|
| 547 |
+
return a;
|
| 548 |
+
else if (isnan(b.value))
|
| 549 |
+
return b;
|
| 550 |
+
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
|
| 551 |
+
return b;
|
| 552 |
+
else
|
| 553 |
+
return a;
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
// specialization for complex<double> for handling NaNs
|
| 557 |
+
template <>
|
| 558 |
+
__host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMin::operator()(
|
| 559 |
+
const KeyValuePair<int, complex<double>> &a,
|
| 560 |
+
const KeyValuePair<int, complex<double>> &b) const
|
| 561 |
+
{
|
| 562 |
+
if (isnan(a.value))
|
| 563 |
+
return a;
|
| 564 |
+
else if (isnan(b.value))
|
| 565 |
+
return b;
|
| 566 |
+
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
|
| 567 |
+
return b;
|
| 568 |
+
else
|
| 569 |
+
return a;
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
|
| 573 |
+
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
|
| 574 |
+
// specialization for half for handling NaNs
|
| 575 |
+
template <>
|
| 576 |
+
__host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMin::operator()(
|
| 577 |
+
const KeyValuePair<int, __half> &a,
|
| 578 |
+
const KeyValuePair<int, __half> &b) const
|
| 579 |
+
{
|
| 580 |
+
if (half_isnan(a.value))
|
| 581 |
+
return a;
|
| 582 |
+
else if (half_isnan(b.value))
|
| 583 |
+
return b;
|
| 584 |
+
else if ((half_less(b.value, a.value)) ||
|
| 585 |
+
(half_equal(a.value, b.value) && (b.key < a.key)))
|
| 586 |
+
return b;
|
| 587 |
+
else
|
| 588 |
+
return a;
|
| 589 |
+
|
| 590 |
+
}
|
| 591 |
+
#endif
|
| 592 |
+
|
| 593 |
+
#ifndef CUPY_USE_HIP
|
| 594 |
+
|
| 595 |
+
//
|
| 596 |
+
// Max()
|
| 597 |
+
//
|
| 598 |
+
|
| 599 |
+
template <typename T>
|
| 600 |
+
struct select_max {
|
| 601 |
+
#if CCCL_VERSION >= 2008000
|
| 602 |
+
using type = cuda::maximum<>;
|
| 603 |
+
#else
|
| 604 |
+
using type = cub::Max;
|
| 605 |
+
#endif
|
| 606 |
+
};
|
| 607 |
+
|
| 608 |
+
template <typename T>
|
| 609 |
+
struct nan_handling_max {
|
| 610 |
+
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const {
|
| 611 |
+
// NumPy behavior: NaN is always chosen!
|
| 612 |
+
if (isnan(a)) {return a;}
|
| 613 |
+
else if (isnan(b)) {return b;}
|
| 614 |
+
else {return a < b ? b : a;}
|
| 615 |
+
}
|
| 616 |
+
};
|
| 617 |
+
|
| 618 |
+
template <>
|
| 619 |
+
struct select_max<float> {
|
| 620 |
+
using type = nan_handling_max<float>;
|
| 621 |
+
};
|
| 622 |
+
|
| 623 |
+
template <>
|
| 624 |
+
struct select_max<double> {
|
| 625 |
+
using type = nan_handling_max<double>;
|
| 626 |
+
};
|
| 627 |
+
|
| 628 |
+
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
|
| 629 |
+
// - isnan() and max() are defined in cupy/complex.cuh
|
| 630 |
+
template <>
|
| 631 |
+
struct select_max<complex<float>> {
|
| 632 |
+
using type = nan_handling_max<complex<float>>;
|
| 633 |
+
};
|
| 634 |
+
template <>
|
| 635 |
+
struct select_max<complex<double>> {
|
| 636 |
+
using type = nan_handling_max<complex<double>>;
|
| 637 |
+
};
|
| 638 |
+
|
| 639 |
+
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
|
| 640 |
+
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
|
| 641 |
+
template <>
|
| 642 |
+
struct select_max<__half> {
|
| 643 |
+
struct type {
|
| 644 |
+
__host__ __device__ __forceinline__ __half operator()(const __half &a, const __half &b) const {
|
| 645 |
+
// NumPy behavior: NaN is always chosen!
|
| 646 |
+
if (half_isnan(a)) {return a;}
|
| 647 |
+
else if (half_isnan(b)) {return b;}
|
| 648 |
+
else { return half_less(a, b) ? b : a; }
|
| 649 |
+
}
|
| 650 |
+
};
|
| 651 |
+
};
|
| 652 |
+
#endif
|
| 653 |
+
|
| 654 |
+
//
|
| 655 |
+
// Min()
|
| 656 |
+
//
|
| 657 |
+
template <typename T>
|
| 658 |
+
struct select_min {
|
| 659 |
+
#if CCCL_VERSION >= 2008000
|
| 660 |
+
using type = cuda::minimum<>;
|
| 661 |
+
#else
|
| 662 |
+
using type = cub::Min;
|
| 663 |
+
#endif
|
| 664 |
+
};
|
| 665 |
+
|
| 666 |
+
template <typename T>
|
| 667 |
+
struct nan_handling_min {
|
| 668 |
+
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const {
|
| 669 |
+
// NumPy behavior: NaN is always chosen!
|
| 670 |
+
if (isnan(a)) {return a;}
|
| 671 |
+
else if (isnan(b)) {return b;}
|
| 672 |
+
else {return a < b ? a : b;}
|
| 673 |
+
}
|
| 674 |
+
};
|
| 675 |
+
|
| 676 |
+
template <>
|
| 677 |
+
struct select_min<float> {
|
| 678 |
+
using type = nan_handling_min<float>;
|
| 679 |
+
};
|
| 680 |
+
|
| 681 |
+
template <>
|
| 682 |
+
struct select_min<double> {
|
| 683 |
+
using type = nan_handling_min<double>;
|
| 684 |
+
};
|
| 685 |
+
|
| 686 |
+
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
|
| 687 |
+
// - isnan() and min() are defined in cupy/complex.cuh
|
| 688 |
+
template <>
|
| 689 |
+
struct select_min<complex<float>> {
|
| 690 |
+
using type = nan_handling_min<complex<float>>;
|
| 691 |
+
};
|
| 692 |
+
template <>
|
| 693 |
+
struct select_min<complex<double>> {
|
| 694 |
+
using type = nan_handling_min<complex<double>>;
|
| 695 |
+
};
|
| 696 |
+
|
| 697 |
+
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
|
| 698 |
+
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
|
| 699 |
+
template <>
|
| 700 |
+
struct select_min<__half> {
|
| 701 |
+
struct type {
|
| 702 |
+
__host__ __device__ __forceinline__ __half operator()(const __half &a, const __half &b) const {
|
| 703 |
+
// NumPy behavior: NaN is always chosen!
|
| 704 |
+
if (half_isnan(a)) {return a;}
|
| 705 |
+
else if (half_isnan(b)) {return b;}
|
| 706 |
+
else { return half_less(a, b) ? a: b; }
|
| 707 |
+
}
|
| 708 |
+
};
|
| 709 |
+
};
|
| 710 |
+
#endif
|
| 711 |
+
|
| 712 |
+
#endif // #ifndef CUPY_USE_HIP
|
| 713 |
+
|
| 714 |
+
/* ------------------------------------ End of "patches" ------------------------------------ */
|
| 715 |
+
|
| 716 |
+
//
|
| 717 |
+
// **** CUB Sum ****
|
| 718 |
+
//
|
| 719 |
+
struct _cub_reduce_sum {
|
| 720 |
+
template <typename T>
|
| 721 |
+
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 722 |
+
int num_items, cudaStream_t s)
|
| 723 |
+
{
|
| 724 |
+
DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x),
|
| 725 |
+
static_cast<T*>(y), num_items, s);
|
| 726 |
+
}
|
| 727 |
+
};
|
| 728 |
+
|
| 729 |
+
struct _cub_segmented_reduce_sum {
|
| 730 |
+
template <typename T>
|
| 731 |
+
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 732 |
+
int num_segments, seg_offset_itr offset_start, cudaStream_t s)
|
| 733 |
+
{
|
| 734 |
+
DeviceSegmentedReduce::Sum(workspace, workspace_size,
|
| 735 |
+
static_cast<T*>(x), static_cast<T*>(y), num_segments,
|
| 736 |
+
offset_start, offset_start+1, s);
|
| 737 |
+
}
|
| 738 |
+
};
|
| 739 |
+
|
| 740 |
+
//
|
| 741 |
+
// **** CUB Prod ****
|
| 742 |
+
//
|
| 743 |
+
struct _cub_reduce_prod {
|
| 744 |
+
template <typename T>
|
| 745 |
+
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 746 |
+
int num_items, cudaStream_t s)
|
| 747 |
+
{
|
| 748 |
+
_multiply product_op;
|
| 749 |
+
// the init value is cast from 1.0f because on host __half can only be
|
| 750 |
+
// initialized by float or double; static_cast<__half>(1) = 0 on host.
|
| 751 |
+
DeviceReduce::Reduce(workspace, workspace_size, static_cast<T*>(x),
|
| 752 |
+
static_cast<T*>(y), num_items, product_op, static_cast<T>(1.0f), s);
|
| 753 |
+
}
|
| 754 |
+
};
|
| 755 |
+
|
| 756 |
+
struct _cub_segmented_reduce_prod {
|
| 757 |
+
template <typename T>
|
| 758 |
+
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 759 |
+
int num_segments, seg_offset_itr offset_start, cudaStream_t s)
|
| 760 |
+
{
|
| 761 |
+
_multiply product_op;
|
| 762 |
+
// the init value is cast from 1.0f because on host __half can only be
|
| 763 |
+
// initialized by float or double; static_cast<__half>(1) = 0 on host.
|
| 764 |
+
DeviceSegmentedReduce::Reduce(workspace, workspace_size,
|
| 765 |
+
static_cast<T*>(x), static_cast<T*>(y), num_segments,
|
| 766 |
+
offset_start, offset_start+1,
|
| 767 |
+
product_op, static_cast<T>(1.0f), s);
|
| 768 |
+
}
|
| 769 |
+
};
|
| 770 |
+
|
| 771 |
+
//
|
| 772 |
+
// **** CUB Min ****
|
| 773 |
+
//
|
| 774 |
+
struct _cub_reduce_min {
|
| 775 |
+
template <typename T>
|
| 776 |
+
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 777 |
+
int num_items, cudaStream_t s)
|
| 778 |
+
{
|
| 779 |
+
if constexpr (std::numeric_limits<T>::has_infinity)
|
| 780 |
+
{
|
| 781 |
+
DeviceReduce::Reduce(workspace, workspace_size, static_cast<T*>(x),
|
| 782 |
+
static_cast<T*>(y), num_items,
|
| 783 |
+
typename select_min<T>::type{}, std::numeric_limits<T>::infinity(), s);
|
| 784 |
+
}
|
| 785 |
+
else
|
| 786 |
+
{
|
| 787 |
+
DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x),
|
| 788 |
+
static_cast<T*>(y), num_items, s);
|
| 789 |
+
}
|
| 790 |
+
}
|
| 791 |
+
};
|
| 792 |
+
|
| 793 |
+
struct _cub_segmented_reduce_min {
|
| 794 |
+
template <typename T>
|
| 795 |
+
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 796 |
+
int num_segments, seg_offset_itr offset_start, cudaStream_t s)
|
| 797 |
+
{
|
| 798 |
+
if constexpr (std::numeric_limits<T>::has_infinity)
|
| 799 |
+
{
|
| 800 |
+
DeviceSegmentedReduce::Reduce(workspace, workspace_size,
|
| 801 |
+
static_cast<T*>(x), static_cast<T*>(y), num_segments,
|
| 802 |
+
offset_start, offset_start+1,
|
| 803 |
+
typename select_min<T>::type{}, std::numeric_limits<T>::infinity(), s);
|
| 804 |
+
}
|
| 805 |
+
else
|
| 806 |
+
{
|
| 807 |
+
DeviceSegmentedReduce::Min(workspace, workspace_size,
|
| 808 |
+
static_cast<T*>(x), static_cast<T*>(y), num_segments,
|
| 809 |
+
offset_start, offset_start+1, s);
|
| 810 |
+
}
|
| 811 |
+
}
|
| 812 |
+
};
|
| 813 |
+
|
| 814 |
+
//
|
| 815 |
+
// **** CUB Max ****
|
| 816 |
+
//
|
| 817 |
+
struct _cub_reduce_max {
|
| 818 |
+
template <typename T>
|
| 819 |
+
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 820 |
+
int num_items, cudaStream_t s)
|
| 821 |
+
{
|
| 822 |
+
if constexpr (std::numeric_limits<T>::has_infinity)
|
| 823 |
+
{
|
| 824 |
+
// to avoid compiler error: invalid argument type '__half' to unary expression on HIP...
|
| 825 |
+
if constexpr (std::is_same_v<T, __half>)
|
| 826 |
+
{
|
| 827 |
+
DeviceReduce::Reduce(workspace, workspace_size, static_cast<T*>(x),
|
| 828 |
+
static_cast<T*>(y), num_items,
|
| 829 |
+
typename select_max<T>::type{}, half_negate_inf(), s);
|
| 830 |
+
}
|
| 831 |
+
else
|
| 832 |
+
{
|
| 833 |
+
DeviceReduce::Reduce(workspace, workspace_size, static_cast<T*>(x),
|
| 834 |
+
static_cast<T*>(y), num_items,
|
| 835 |
+
typename select_max<T>::type{}, -std::numeric_limits<T>::infinity(), s);
|
| 836 |
+
|
| 837 |
+
}
|
| 838 |
+
}
|
| 839 |
+
else
|
| 840 |
+
{
|
| 841 |
+
DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x),
|
| 842 |
+
static_cast<T*>(y), num_items, s);
|
| 843 |
+
}
|
| 844 |
+
}
|
| 845 |
+
};
|
| 846 |
+
|
| 847 |
+
struct _cub_segmented_reduce_max {
|
| 848 |
+
template <typename T>
|
| 849 |
+
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 850 |
+
int num_segments, seg_offset_itr offset_start, cudaStream_t s)
|
| 851 |
+
{
|
| 852 |
+
if constexpr (std::numeric_limits<T>::has_infinity)
|
| 853 |
+
{
|
| 854 |
+
// to avoid compiler error: invalid argument type '__half' to unary expression on HIP...
|
| 855 |
+
if constexpr (std::is_same_v<T, __half>)
|
| 856 |
+
{
|
| 857 |
+
DeviceSegmentedReduce::Reduce(workspace, workspace_size,
|
| 858 |
+
static_cast<T*>(x), static_cast<T*>(y), num_segments,
|
| 859 |
+
offset_start, offset_start+1,
|
| 860 |
+
typename select_max<T>::type{}, half_negate_inf(), s);
|
| 861 |
+
}
|
| 862 |
+
else
|
| 863 |
+
{
|
| 864 |
+
DeviceSegmentedReduce::Reduce(workspace, workspace_size,
|
| 865 |
+
static_cast<T*>(x), static_cast<T*>(y), num_segments,
|
| 866 |
+
offset_start, offset_start+1,
|
| 867 |
+
typename select_max<T>::type{}, -std::numeric_limits<T>::infinity(), s);
|
| 868 |
+
}
|
| 869 |
+
}
|
| 870 |
+
else
|
| 871 |
+
{
|
| 872 |
+
DeviceSegmentedReduce::Max(workspace, workspace_size,
|
| 873 |
+
static_cast<T*>(x), static_cast<T*>(y), num_segments,
|
| 874 |
+
offset_start, offset_start+1, s);
|
| 875 |
+
}
|
| 876 |
+
}
|
| 877 |
+
};
|
| 878 |
+
|
| 879 |
+
//
|
| 880 |
+
// **** CUB ArgMin ****
|
| 881 |
+
//
|
| 882 |
+
struct _cub_reduce_argmin {
|
| 883 |
+
template <typename T>
|
| 884 |
+
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 885 |
+
int num_items, cudaStream_t s)
|
| 886 |
+
{
|
| 887 |
+
DeviceReduce::ArgMin(workspace, workspace_size, static_cast<T*>(x),
|
| 888 |
+
static_cast<KeyValuePair<int, T>*>(y), num_items, s);
|
| 889 |
+
}
|
| 890 |
+
};
|
| 891 |
+
|
| 892 |
+
// TODO(leofang): add _cub_segmented_reduce_argmin
|
| 893 |
+
|
| 894 |
+
//
|
| 895 |
+
// **** CUB ArgMax ****
|
| 896 |
+
//
|
| 897 |
+
struct _cub_reduce_argmax {
|
| 898 |
+
template <typename T>
|
| 899 |
+
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 900 |
+
int num_items, cudaStream_t s)
|
| 901 |
+
{
|
| 902 |
+
DeviceReduce::ArgMax(workspace, workspace_size, static_cast<T*>(x),
|
| 903 |
+
static_cast<KeyValuePair<int, T>*>(y), num_items, s);
|
| 904 |
+
}
|
| 905 |
+
};
|
| 906 |
+
|
| 907 |
+
// TODO(leofang): add _cub_segmented_reduce_argmax
|
| 908 |
+
|
| 909 |
+
//
|
| 910 |
+
// **** CUB SpMV ****
|
| 911 |
+
//
|
| 912 |
+
struct _cub_device_spmv {
|
| 913 |
+
template <typename T>
|
| 914 |
+
void operator()(void* workspace, size_t& workspace_size, void* values,
|
| 915 |
+
void* row_offsets, void* column_indices, void* x, void* y,
|
| 916 |
+
int num_rows, int num_cols, int num_nonzeros, cudaStream_t stream)
|
| 917 |
+
{
|
| 918 |
+
#ifndef CUPY_USE_HIP
|
| 919 |
+
DeviceSpmv::CsrMV(workspace, workspace_size, static_cast<T*>(values),
|
| 920 |
+
static_cast<int*>(row_offsets), static_cast<int*>(column_indices),
|
| 921 |
+
static_cast<T*>(x), static_cast<T*>(y), num_rows, num_cols,
|
| 922 |
+
num_nonzeros, stream);
|
| 923 |
+
#endif
|
| 924 |
+
}
|
| 925 |
+
};
|
| 926 |
+
|
| 927 |
+
//
|
| 928 |
+
// **** CUB InclusiveSum ****
|
| 929 |
+
//
|
| 930 |
+
struct _cub_inclusive_sum {
|
| 931 |
+
template <typename T>
|
| 932 |
+
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
|
| 933 |
+
int num_items, cudaStream_t s)
|
| 934 |
+
{
|
| 935 |
+
DeviceScan::InclusiveSum(workspace, workspace_size, static_cast<T*>(input),
|
| 936 |
+
static_cast<T*>(output), num_items, s);
|
| 937 |
+
}
|
| 938 |
+
};
|
| 939 |
+
|
| 940 |
+
//
|
| 941 |
+
// **** CUB inclusive product ****
|
| 942 |
+
//
|
| 943 |
+
struct _cub_inclusive_product {
|
| 944 |
+
template <typename T>
|
| 945 |
+
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
|
| 946 |
+
int num_items, cudaStream_t s)
|
| 947 |
+
{
|
| 948 |
+
_multiply product_op;
|
| 949 |
+
DeviceScan::InclusiveScan(workspace, workspace_size, static_cast<T*>(input),
|
| 950 |
+
static_cast<T*>(output), product_op, num_items, s);
|
| 951 |
+
}
|
| 952 |
+
};
|
| 953 |
+
|
| 954 |
+
//
|
| 955 |
+
// **** CUB histogram range ****
|
| 956 |
+
//
|
| 957 |
+
struct _cub_histogram_range {
|
| 958 |
+
template <typename sampleT,
|
| 959 |
+
typename binT = typename std::conditional<std::is_integral<sampleT>::value, double, sampleT>::type>
|
| 960 |
+
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
|
| 961 |
+
int n_bins, void* bins, size_t n_samples, cudaStream_t s) const
|
| 962 |
+
{
|
| 963 |
+
// Ugly hack to avoid specializing complex types, which cub::DeviceHistogram does not support.
|
| 964 |
+
// TODO(leofang): revisit this part when complex support is added to cupy.histogram()
|
| 965 |
+
typedef typename std::conditional<(std::is_same<sampleT, complex<float>>::value || std::is_same<sampleT, complex<double>>::value),
|
| 966 |
+
double,
|
| 967 |
+
sampleT>::type h_sampleT;
|
| 968 |
+
typedef typename std::conditional<(std::is_same<binT, complex<float>>::value || std::is_same<binT, complex<double>>::value),
|
| 969 |
+
double,
|
| 970 |
+
binT>::type h_binT;
|
| 971 |
+
|
| 972 |
+
// TODO(leofang): CUB has a bug that when specializing n_samples with type size_t,
|
| 973 |
+
// it would error out. Before the fix (thrust/cub#38) is merged we disable the code
|
| 974 |
+
// path splitting for now. A type/range check must be done in the caller.
|
| 975 |
+
// TODO(leofang): check if hipCUB has the same bug or not
|
| 976 |
+
|
| 977 |
+
// if (n_samples < (1ULL << 31)) {
|
| 978 |
+
int num_samples = n_samples;
|
| 979 |
+
DeviceHistogram::HistogramRange(workspace, workspace_size, static_cast<h_sampleT*>(input),
|
| 980 |
+
#ifndef CUPY_USE_HIP
|
| 981 |
+
static_cast<long long*>(output), n_bins, static_cast<h_binT*>(bins), num_samples, s);
|
| 982 |
+
#else
|
| 983 |
+
// rocPRIM looks up atomic_add() from the namespace rocprim::detail; there's no way we can
|
| 984 |
+
// inject a "long long" version as we did for CUDA, so we must do it in "unsigned long long"
|
| 985 |
+
// and convert later...
|
| 986 |
+
static_cast<unsigned long long*>(output), n_bins, static_cast<h_binT*>(bins), num_samples, s);
|
| 987 |
+
#endif
|
| 988 |
+
// } else {
|
| 989 |
+
// DeviceHistogram::HistogramRange(workspace, workspace_size, static_cast<h_sampleT*>(input),
|
| 990 |
+
// static_cast<long long*>(output), n_bins, static_cast<h_binT*>(bins), n_samples, s);
|
| 991 |
+
// }
|
| 992 |
+
}
|
| 993 |
+
};
|
| 994 |
+
|
| 995 |
+
//
|
| 996 |
+
// **** CUB histogram even ****
|
| 997 |
+
//
|
| 998 |
+
struct _cub_histogram_even {
|
| 999 |
+
template <typename sampleT>
|
| 1000 |
+
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
|
| 1001 |
+
int& n_bins, int& lower, int& upper, size_t n_samples, cudaStream_t s) const
|
| 1002 |
+
{
|
| 1003 |
+
#ifndef CUPY_USE_HIP
|
| 1004 |
+
// Ugly hack to avoid specializing numerical types
|
| 1005 |
+
typedef typename std::conditional<std::is_integral<sampleT>::value, sampleT, int>::type h_sampleT;
|
| 1006 |
+
int num_samples = n_samples;
|
| 1007 |
+
static_assert(sizeof(long long) == sizeof(intptr_t), "not supported");
|
| 1008 |
+
DeviceHistogram::HistogramEven(workspace, workspace_size, static_cast<h_sampleT*>(input),
|
| 1009 |
+
static_cast<long long*>(output), n_bins, lower, upper, num_samples, s);
|
| 1010 |
+
#else
|
| 1011 |
+
throw std::runtime_error("HIP is not supported yet");
|
| 1012 |
+
#endif
|
| 1013 |
+
}
|
| 1014 |
+
};
|
| 1015 |
+
|
| 1016 |
+
//
|
| 1017 |
+
// APIs exposed to CuPy
|
| 1018 |
+
//
|
| 1019 |
+
|
| 1020 |
+
/* -------- device reduce -------- */
|
| 1021 |
+
|
| 1022 |
+
void cub_device_reduce(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 1023 |
+
int num_items, cudaStream_t stream, int op, int dtype_id)
|
| 1024 |
+
{
|
| 1025 |
+
switch(op) {
|
| 1026 |
+
case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_reduce_sum(),
|
| 1027 |
+
workspace, workspace_size, x, y, num_items, stream);
|
| 1028 |
+
case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_reduce_min(),
|
| 1029 |
+
workspace, workspace_size, x, y, num_items, stream);
|
| 1030 |
+
case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_reduce_max(),
|
| 1031 |
+
workspace, workspace_size, x, y, num_items, stream);
|
| 1032 |
+
case CUPY_CUB_ARGMIN: return dtype_dispatcher(dtype_id, _cub_reduce_argmin(),
|
| 1033 |
+
workspace, workspace_size, x, y, num_items, stream);
|
| 1034 |
+
case CUPY_CUB_ARGMAX: return dtype_dispatcher(dtype_id, _cub_reduce_argmax(),
|
| 1035 |
+
workspace, workspace_size, x, y, num_items, stream);
|
| 1036 |
+
case CUPY_CUB_PROD: return dtype_dispatcher(dtype_id, _cub_reduce_prod(),
|
| 1037 |
+
workspace, workspace_size, x, y, num_items, stream);
|
| 1038 |
+
default: throw std::runtime_error("Unsupported operation");
|
| 1039 |
+
}
|
| 1040 |
+
}
|
| 1041 |
+
|
| 1042 |
+
size_t cub_device_reduce_get_workspace_size(void* x, void* y, int num_items,
|
| 1043 |
+
cudaStream_t stream, int op, int dtype_id)
|
| 1044 |
+
{
|
| 1045 |
+
size_t workspace_size = 0;
|
| 1046 |
+
cub_device_reduce(NULL, workspace_size, x, y, num_items, stream,
|
| 1047 |
+
op, dtype_id);
|
| 1048 |
+
return workspace_size;
|
| 1049 |
+
}
|
| 1050 |
+
|
| 1051 |
+
/* -------- device segmented reduce -------- */
|
| 1052 |
+
|
| 1053 |
+
void cub_device_segmented_reduce(void* workspace, size_t& workspace_size,
|
| 1054 |
+
void* x, void* y, int num_segments, int segment_size,
|
| 1055 |
+
cudaStream_t stream, int op, int dtype_id)
|
| 1056 |
+
{
|
| 1057 |
+
// CUB internally use int for offset...
|
| 1058 |
+
// This iterates over [0, segment_size, 2*segment_size, 3*segment_size, ...]
|
| 1059 |
+
#ifndef CUPY_USE_HIP
|
| 1060 |
+
CountingInputIterator<int> count_itr(0);
|
| 1061 |
+
#else
|
| 1062 |
+
rocprim::counting_iterator<int> count_itr(0);
|
| 1063 |
+
#endif
|
| 1064 |
+
_arange scaling(segment_size);
|
| 1065 |
+
seg_offset_itr itr(count_itr, scaling);
|
| 1066 |
+
|
| 1067 |
+
switch(op) {
|
| 1068 |
+
case CUPY_CUB_SUM:
|
| 1069 |
+
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_sum(),
|
| 1070 |
+
workspace, workspace_size, x, y, num_segments, itr, stream);
|
| 1071 |
+
case CUPY_CUB_MIN:
|
| 1072 |
+
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_min(),
|
| 1073 |
+
workspace, workspace_size, x, y, num_segments, itr, stream);
|
| 1074 |
+
case CUPY_CUB_MAX:
|
| 1075 |
+
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_max(),
|
| 1076 |
+
workspace, workspace_size, x, y, num_segments, itr, stream);
|
| 1077 |
+
case CUPY_CUB_PROD:
|
| 1078 |
+
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_prod(),
|
| 1079 |
+
workspace, workspace_size, x, y, num_segments, itr, stream);
|
| 1080 |
+
default:
|
| 1081 |
+
throw std::runtime_error("Unsupported operation");
|
| 1082 |
+
}
|
| 1083 |
+
}
|
| 1084 |
+
|
| 1085 |
+
size_t cub_device_segmented_reduce_get_workspace_size(void* x, void* y,
|
| 1086 |
+
int num_segments, int segment_size,
|
| 1087 |
+
cudaStream_t stream, int op, int dtype_id)
|
| 1088 |
+
{
|
| 1089 |
+
size_t workspace_size = 0;
|
| 1090 |
+
cub_device_segmented_reduce(NULL, workspace_size, x, y,
|
| 1091 |
+
num_segments, segment_size, stream,
|
| 1092 |
+
op, dtype_id);
|
| 1093 |
+
return workspace_size;
|
| 1094 |
+
}
|
| 1095 |
+
|
| 1096 |
+
/*--------- device spmv (sparse-matrix dense-vector multiply) ---------*/
|
| 1097 |
+
|
| 1098 |
+
void cub_device_spmv(void* workspace, size_t& workspace_size, void* values,
|
| 1099 |
+
void* row_offsets, void* column_indices, void* x, void* y, int num_rows,
|
| 1100 |
+
int num_cols, int num_nonzeros, cudaStream_t stream,
|
| 1101 |
+
int dtype_id)
|
| 1102 |
+
{
|
| 1103 |
+
#ifndef CUPY_USE_HIP
|
| 1104 |
+
return dtype_dispatcher(dtype_id, _cub_device_spmv(),
|
| 1105 |
+
workspace, workspace_size, values, row_offsets,
|
| 1106 |
+
column_indices, x, y, num_rows, num_cols,
|
| 1107 |
+
num_nonzeros, stream);
|
| 1108 |
+
#endif
|
| 1109 |
+
}
|
| 1110 |
+
|
| 1111 |
+
size_t cub_device_spmv_get_workspace_size(void* values, void* row_offsets,
|
| 1112 |
+
void* column_indices, void* x, void* y, int num_rows, int num_cols,
|
| 1113 |
+
int num_nonzeros, cudaStream_t stream, int dtype_id)
|
| 1114 |
+
{
|
| 1115 |
+
size_t workspace_size = 0;
|
| 1116 |
+
#ifndef CUPY_USE_HIP
|
| 1117 |
+
cub_device_spmv(NULL, workspace_size, values, row_offsets, column_indices,
|
| 1118 |
+
x, y, num_rows, num_cols, num_nonzeros, stream, dtype_id);
|
| 1119 |
+
#endif
|
| 1120 |
+
return workspace_size;
|
| 1121 |
+
}
|
| 1122 |
+
|
| 1123 |
+
/* -------- device scan -------- */
|
| 1124 |
+
|
| 1125 |
+
void cub_device_scan(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 1126 |
+
int num_items, cudaStream_t stream, int op, int dtype_id)
|
| 1127 |
+
{
|
| 1128 |
+
switch(op) {
|
| 1129 |
+
case CUPY_CUB_CUMSUM:
|
| 1130 |
+
return dtype_dispatcher(dtype_id, _cub_inclusive_sum(),
|
| 1131 |
+
workspace, workspace_size, x, y, num_items, stream);
|
| 1132 |
+
case CUPY_CUB_CUMPROD:
|
| 1133 |
+
return dtype_dispatcher(dtype_id, _cub_inclusive_product(),
|
| 1134 |
+
workspace, workspace_size, x, y, num_items, stream);
|
| 1135 |
+
default:
|
| 1136 |
+
throw std::runtime_error("Unsupported operation");
|
| 1137 |
+
}
|
| 1138 |
+
}
|
| 1139 |
+
|
| 1140 |
+
size_t cub_device_scan_get_workspace_size(void* x, void* y, int num_items,
|
| 1141 |
+
cudaStream_t stream, int op, int dtype_id)
|
| 1142 |
+
{
|
| 1143 |
+
size_t workspace_size = 0;
|
| 1144 |
+
cub_device_scan(NULL, workspace_size, x, y, num_items, stream,
|
| 1145 |
+
op, dtype_id);
|
| 1146 |
+
return workspace_size;
|
| 1147 |
+
}
|
| 1148 |
+
|
| 1149 |
+
/* -------- device histogram -------- */
|
| 1150 |
+
|
| 1151 |
+
void cub_device_histogram_range(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 1152 |
+
int n_bins, void* bins, size_t n_samples, cudaStream_t stream, int dtype_id)
|
| 1153 |
+
{
|
| 1154 |
+
// TODO(leofang): support complex
|
| 1155 |
+
if (dtype_id == CUPY_TYPE_COMPLEX64 || dtype_id == CUPY_TYPE_COMPLEX128) {
|
| 1156 |
+
throw std::runtime_error("complex dtype is not yet supported");
|
| 1157 |
+
}
|
| 1158 |
+
|
| 1159 |
+
// TODO(leofang): n_samples is of type size_t, but if it's < 2^31 we cast it to int later
|
| 1160 |
+
return dtype_dispatcher(dtype_id, _cub_histogram_range(),
|
| 1161 |
+
workspace, workspace_size, x, y, n_bins, bins, n_samples, stream);
|
| 1162 |
+
}
|
| 1163 |
+
|
| 1164 |
+
size_t cub_device_histogram_range_get_workspace_size(void* x, void* y, int n_bins,
|
| 1165 |
+
void* bins, size_t n_samples, cudaStream_t stream, int dtype_id)
|
| 1166 |
+
{
|
| 1167 |
+
size_t workspace_size = 0;
|
| 1168 |
+
cub_device_histogram_range(NULL, workspace_size, x, y, n_bins, bins, n_samples,
|
| 1169 |
+
stream, dtype_id);
|
| 1170 |
+
return workspace_size;
|
| 1171 |
+
}
|
| 1172 |
+
|
| 1173 |
+
void cub_device_histogram_even(void* workspace, size_t& workspace_size, void* x, void* y,
|
| 1174 |
+
int n_bins, int lower, int upper, size_t n_samples, cudaStream_t stream, int dtype_id)
|
| 1175 |
+
{
|
| 1176 |
+
#ifndef CUPY_USE_HIP
|
| 1177 |
+
return dtype_dispatcher(dtype_id, _cub_histogram_even(),
|
| 1178 |
+
workspace, workspace_size, x, y, n_bins, lower, upper, n_samples, stream);
|
| 1179 |
+
#endif
|
| 1180 |
+
}
|
| 1181 |
+
|
| 1182 |
+
size_t cub_device_histogram_even_get_workspace_size(void* x, void* y, int n_bins,
|
| 1183 |
+
int lower, int upper, size_t n_samples, cudaStream_t stream, int dtype_id)
|
| 1184 |
+
{
|
| 1185 |
+
size_t workspace_size = 0;
|
| 1186 |
+
cub_device_histogram_even(NULL, workspace_size, x, y, n_bins, lower, upper, n_samples,
|
| 1187 |
+
stream, dtype_id);
|
| 1188 |
+
return workspace_size;
|
| 1189 |
+
}
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cupy_cufft.h
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef INCLUDE_GUARD_CUPY_CUFFT_H
|
| 2 |
+
#define INCLUDE_GUARD_CUPY_CUFFT_H
|
| 3 |
+
|
| 4 |
+
/*
|
| 5 |
+
* Note: this file should *not* be split into 3 and moved under cupy_backends/,
|
| 6 |
+
* because we need to copy this header to sdist and use it at runtime for cuFFT
|
| 7 |
+
* callbacks.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#if !defined(CUPY_NO_CUDA) && !defined(CUPY_USE_HIP)
|
| 11 |
+
#include <cufft.h>
|
| 12 |
+
#include <cufftXt.h>
|
| 13 |
+
|
| 14 |
+
#elif defined(CUPY_USE_HIP)
|
| 15 |
+
#include <hipfft.h>
|
| 16 |
+
|
| 17 |
+
extern "C" {
|
| 18 |
+
|
| 19 |
+
typedef hipfftComplex cufftComplex;
|
| 20 |
+
typedef hipfftDoubleComplex cufftDoubleComplex;
|
| 21 |
+
typedef hipfftReal cufftReal;
|
| 22 |
+
typedef hipfftDoubleReal cufftDoubleReal;
|
| 23 |
+
|
| 24 |
+
typedef hipfftResult_t cufftResult_t;
|
| 25 |
+
typedef hipfftHandle cufftHandle;
|
| 26 |
+
typedef hipfftType_t cufftType_t;
|
| 27 |
+
typedef hipStream_t cudaStream_t;
|
| 28 |
+
|
| 29 |
+
// cuFFT Helper Function
|
| 30 |
+
cufftResult_t cufftCreate(cufftHandle* plan) {
|
| 31 |
+
return hipfftCreate(plan);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
cufftResult_t cufftDestroy(cufftHandle plan) {
|
| 35 |
+
return hipfftDestroy(plan);
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
cufftResult_t cufftSetAutoAllocation(cufftHandle plan, int autoAllocate) {
|
| 39 |
+
return hipfftSetAutoAllocation(plan, autoAllocate);
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
cufftResult_t cufftSetWorkArea(cufftHandle plan, void *workArea) {
|
| 43 |
+
return hipfftSetWorkArea(plan, workArea);
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
// cuFFT Stream Function
|
| 47 |
+
cufftResult_t cufftSetStream(cufftHandle plan, cudaStream_t stream) {
|
| 48 |
+
return hipfftSetStream(plan, stream);
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
// cuFFT Plan Functions
|
| 52 |
+
cufftResult_t cufftMakePlan1d(cufftHandle plan,
|
| 53 |
+
int nx,
|
| 54 |
+
cufftType_t type,
|
| 55 |
+
int batch,
|
| 56 |
+
size_t *workSize) {
|
| 57 |
+
return hipfftMakePlan1d(plan, nx, type, batch, workSize);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
cufftResult_t cufftMakePlanMany(cufftHandle plan,
|
| 61 |
+
int rank,
|
| 62 |
+
int *n,
|
| 63 |
+
int *inembed, int istride, int idist,
|
| 64 |
+
int *onembed, int ostride, int odist,
|
| 65 |
+
cufftType_t type,
|
| 66 |
+
int batch,
|
| 67 |
+
size_t *workSize) {
|
| 68 |
+
return hipfftMakePlanMany(plan, rank, n,
|
| 69 |
+
inembed, istride, idist,
|
| 70 |
+
onembed, ostride, odist,
|
| 71 |
+
type, batch, workSize);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
// cuFFT Exec Function
|
| 75 |
+
cufftResult_t cufftExecC2C(cufftHandle plan,
|
| 76 |
+
cufftComplex *idata,
|
| 77 |
+
cufftComplex *odata,
|
| 78 |
+
int direction) {
|
| 79 |
+
return hipfftExecC2C(plan, idata, odata, direction);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
cufftResult_t cufftExecR2C(cufftHandle plan,
|
| 83 |
+
cufftReal *idata,
|
| 84 |
+
cufftComplex *odata) {
|
| 85 |
+
return hipfftExecR2C(plan, idata, odata);
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
cufftResult_t cufftExecC2R(cufftHandle plan,
|
| 89 |
+
cufftComplex *idata,
|
| 90 |
+
cufftReal *odata) {
|
| 91 |
+
return hipfftExecC2R(plan, idata, odata);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
cufftResult_t cufftExecZ2Z(cufftHandle plan,
|
| 95 |
+
cufftDoubleComplex *idata,
|
| 96 |
+
cufftDoubleComplex *odata,
|
| 97 |
+
int direction) {
|
| 98 |
+
return hipfftExecZ2Z(plan, idata, odata, direction);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
cufftResult_t cufftExecD2Z(cufftHandle plan,
|
| 102 |
+
cufftDoubleReal *idata,
|
| 103 |
+
cufftDoubleComplex *odata) {
|
| 104 |
+
return hipfftExecD2Z(plan, idata, odata);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
cufftResult_t cufftExecZ2D(cufftHandle plan,
|
| 108 |
+
cufftDoubleComplex *idata,
|
| 109 |
+
cufftDoubleReal *odata) {
|
| 110 |
+
return hipfftExecZ2D(plan, idata, odata);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
// cuFFT Version
|
| 114 |
+
cufftResult_t cufftGetVersion(int *version) {
|
| 115 |
+
return hipfftGetVersion(version);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
// TODO(leofang): move this header to cupy_backends/ and include hip/cupy_hip_common.h
|
| 119 |
+
typedef enum {} cudaDataType;
|
| 120 |
+
|
| 121 |
+
// cufftXt functions
|
| 122 |
+
cufftResult_t cufftXtSetGPUs(...) {
|
| 123 |
+
return HIPFFT_NOT_IMPLEMENTED;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
cufftResult_t cufftXtSetWorkArea(...) {
|
| 127 |
+
return HIPFFT_NOT_IMPLEMENTED;
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
cufftResult_t cufftXtMemcpy(...) {
|
| 131 |
+
return HIPFFT_NOT_IMPLEMENTED;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
cufftResult_t cufftXtMakePlanMany(...) {
|
| 135 |
+
return HIPFFT_NOT_IMPLEMENTED;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
cufftResult_t cufftXtExec(...) {
|
| 139 |
+
return HIPFFT_NOT_IMPLEMENTED;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
cufftResult_t cufftXtExecDescriptorC2C(...) {
|
| 143 |
+
return HIPFFT_NOT_IMPLEMENTED;
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
cufftResult_t cufftXtExecDescriptorZ2Z(...) {
|
| 147 |
+
return HIPFFT_NOT_IMPLEMENTED;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
} // extern "C"
|
| 151 |
+
|
| 152 |
+
#else // defined(CUPY_NO_CUDA)
|
| 153 |
+
|
| 154 |
+
#include "../../cupy_backends/stub/cupy_cuda_common.h"
|
| 155 |
+
#include "../../cupy_backends/stub/cupy_cuComplex.h"
|
| 156 |
+
|
| 157 |
+
extern "C" {
|
| 158 |
+
|
| 159 |
+
typedef float cufftReal;
|
| 160 |
+
typedef double cufftDoubleReal;
|
| 161 |
+
typedef cuComplex cufftComplex;
|
| 162 |
+
typedef cuDoubleComplex cufftDoubleComplex;
|
| 163 |
+
|
| 164 |
+
typedef enum {
|
| 165 |
+
CUFFT_SUCCESS = 0,
|
| 166 |
+
CUFFT_INVALID_PLAN = 1,
|
| 167 |
+
CUFFT_ALLOC_FAILED = 2,
|
| 168 |
+
CUFFT_INVALID_TYPE = 3,
|
| 169 |
+
CUFFT_INVALID_VALUE = 4,
|
| 170 |
+
CUFFT_INTERNAL_ERROR = 5,
|
| 171 |
+
CUFFT_EXEC_FAILED = 6,
|
| 172 |
+
CUFFT_SETUP_FAILED = 7,
|
| 173 |
+
CUFFT_INVALID_SIZE = 8,
|
| 174 |
+
CUFFT_UNALIGNED_DATA = 9,
|
| 175 |
+
CUFFT_INCOMPLETE_PARAMETER_LIST = 10,
|
| 176 |
+
CUFFT_INVALID_DEVICE = 11,
|
| 177 |
+
CUFFT_PARSE_ERROR = 12,
|
| 178 |
+
CUFFT_NO_WORKSPACE = 13,
|
| 179 |
+
CUFFT_NOT_IMPLEMENTED = 14,
|
| 180 |
+
CUFFT_LICENSE_ERROR = 15,
|
| 181 |
+
CUFFT_NOT_SUPPORTED = 16,
|
| 182 |
+
} cufftResult_t;
|
| 183 |
+
|
| 184 |
+
typedef int cufftHandle;
|
| 185 |
+
|
| 186 |
+
typedef enum {} cufftType_t;
|
| 187 |
+
|
| 188 |
+
// cuFFT Helper Function
|
| 189 |
+
cufftResult_t cufftCreate(...) {
|
| 190 |
+
return CUFFT_SUCCESS;
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
cufftResult_t cufftDestroy(...) {
|
| 194 |
+
return CUFFT_SUCCESS;
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
cufftResult_t cufftSetAutoAllocation(...) {
|
| 198 |
+
return CUFFT_SUCCESS;
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
cufftResult_t cufftSetWorkArea(...) {
|
| 202 |
+
return CUFFT_SUCCESS;
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
// cuFFT Stream Function
|
| 206 |
+
cufftResult_t cufftSetStream(...) {
|
| 207 |
+
return CUFFT_SUCCESS;
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
// cuFFT Plan Functions
|
| 211 |
+
cufftResult_t cufftMakePlan1d(...) {
|
| 212 |
+
return CUFFT_SUCCESS;
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
cufftResult_t cufftMakePlanMany(...) {
|
| 216 |
+
return CUFFT_SUCCESS;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
// cuFFT Exec Function
|
| 220 |
+
cufftResult_t cufftExecC2C(...) {
|
| 221 |
+
return CUFFT_SUCCESS;
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
cufftResult_t cufftExecR2C(...) {
|
| 225 |
+
return CUFFT_SUCCESS;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
cufftResult_t cufftExecC2R(...) {
|
| 229 |
+
return CUFFT_SUCCESS;
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
cufftResult_t cufftExecZ2Z(...) {
|
| 233 |
+
return CUFFT_SUCCESS;
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
cufftResult_t cufftExecD2Z(...) {
|
| 237 |
+
return CUFFT_SUCCESS;
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
cufftResult_t cufftExecZ2D(...) {
|
| 241 |
+
return CUFFT_SUCCESS;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
// cuFFT Version
|
| 245 |
+
cufftResult_t cufftGetVersion(...) {
|
| 246 |
+
return CUFFT_SUCCESS;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
// cufftXt functions
|
| 250 |
+
cufftResult_t cufftXtSetGPUs(...) {
|
| 251 |
+
return CUFFT_SUCCESS;
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
cufftResult_t cufftXtSetWorkArea(...) {
|
| 255 |
+
return CUFFT_SUCCESS;
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
cufftResult_t cufftXtMemcpy(...) {
|
| 259 |
+
return CUFFT_SUCCESS;
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
cufftResult_t cufftXtMakePlanMany(...) {
|
| 263 |
+
return CUFFT_SUCCESS;
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
cufftResult_t cufftXtExec(...) {
|
| 267 |
+
return CUFFT_SUCCESS;
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
cufftResult_t cufftXtExecDescriptorC2C(...) {
|
| 271 |
+
return CUFFT_SUCCESS;
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
cufftResult_t cufftXtExecDescriptorZ2Z(...) {
|
| 275 |
+
return CUFFT_SUCCESS;
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
} // extern "C"
|
| 279 |
+
|
| 280 |
+
#endif // #if !defined(CUPY_NO_CUDA) && !defined(CUPY_USE_HIP)
|
| 281 |
+
|
| 282 |
+
#if defined(CUPY_NO_CUDA) || defined(CUPY_USE_HIP)
|
| 283 |
+
// common stubs for both no-cuda and hip environments
|
| 284 |
+
|
| 285 |
+
extern "C" {
|
| 286 |
+
// cufftXt relevant data structs
|
| 287 |
+
typedef struct cudaXtDesc_t {
|
| 288 |
+
int version;
|
| 289 |
+
int nGPUs;
|
| 290 |
+
int GPUs[64];
|
| 291 |
+
void* data[64];
|
| 292 |
+
size_t size[64];
|
| 293 |
+
void* cudaXtState;
|
| 294 |
+
} cudaXtDesc;
|
| 295 |
+
|
| 296 |
+
typedef enum cufftXtSubFormat_t {
|
| 297 |
+
CUFFT_XT_FORMAT_INPUT = 0x00,
|
| 298 |
+
CUFFT_XT_FORMAT_OUTPUT = 0x01,
|
| 299 |
+
CUFFT_XT_FORMAT_INPLACE = 0x02,
|
| 300 |
+
CUFFT_XT_FORMAT_INPLACE_SHUFFLED = 0x03,
|
| 301 |
+
CUFFT_XT_FORMAT_1D_INPUT_SHUFFLED = 0x04,
|
| 302 |
+
CUFFT_FORMAT_UNDEFINED = 0x05
|
| 303 |
+
} cufftXtSubFormat;
|
| 304 |
+
|
| 305 |
+
typedef struct cudaLibXtDesc_t{
|
| 306 |
+
int version;
|
| 307 |
+
cudaXtDesc *descriptor;
|
| 308 |
+
int library; // libFormat is an undoumented type, so use int here
|
| 309 |
+
int subFormat;
|
| 310 |
+
void *libDescriptor;
|
| 311 |
+
} cudaLibXtDesc;
|
| 312 |
+
|
| 313 |
+
typedef enum cufftXtCopyType_t {
|
| 314 |
+
CUFFT_COPY_HOST_TO_DEVICE = 0x00,
|
| 315 |
+
CUFFT_COPY_DEVICE_TO_HOST = 0x01,
|
| 316 |
+
CUFFT_COPY_DEVICE_TO_DEVICE = 0x02,
|
| 317 |
+
CUFFT_COPY_UNDEFINED = 0x03
|
| 318 |
+
} cufftXtCopyType;
|
| 319 |
+
|
| 320 |
+
} // extern "C"
|
| 321 |
+
|
| 322 |
+
#endif // #if defined(CUPY_NO_CUDA) || defined(CUPY_USE_HIP)
|
| 323 |
+
|
| 324 |
+
#endif // INCLUDE_GUARD_CUPY_CUFFT_H
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cupy_cufftXt.cu
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "cupy_cufftXt.h"
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
// this must define d_loadCallbackPtr
|
| 5 |
+
${dev_load_callback_ker}
|
| 6 |
+
|
| 7 |
+
// this must define d_storeCallbackPtr
|
| 8 |
+
${dev_store_callback_ker}
|
| 9 |
+
|
| 10 |
+
cufftResult set_callback(cufftHandle plan, cufftXtCallbackType type, bool cb_load, void** callerInfo) {
|
| 11 |
+
if (cb_load) {
|
| 12 |
+
switch (type) {
|
| 13 |
+
#ifdef HAS_LOAD_CALLBACK
|
| 14 |
+
case CUFFT_CB_LD_COMPLEX: {
|
| 15 |
+
cufftCallbackLoadC h_ptr;
|
| 16 |
+
cudaMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr));
|
| 17 |
+
return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo);
|
| 18 |
+
}
|
| 19 |
+
case CUFFT_CB_LD_COMPLEX_DOUBLE: {
|
| 20 |
+
cufftCallbackLoadZ h_ptr;
|
| 21 |
+
cudaMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr));
|
| 22 |
+
return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo);
|
| 23 |
+
}
|
| 24 |
+
case CUFFT_CB_LD_REAL: {
|
| 25 |
+
cufftCallbackLoadR h_ptr;
|
| 26 |
+
cudaMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr));
|
| 27 |
+
return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo);
|
| 28 |
+
}
|
| 29 |
+
case CUFFT_CB_LD_REAL_DOUBLE: {
|
| 30 |
+
cufftCallbackLoadD h_ptr;
|
| 31 |
+
cudaMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr));
|
| 32 |
+
return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo);
|
| 33 |
+
}
|
| 34 |
+
#endif // HAS_LOAD_CALLBACK
|
| 35 |
+
default: {
|
| 36 |
+
throw std::runtime_error("unrecognized callback");
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
} else {
|
| 40 |
+
switch (type) {
|
| 41 |
+
#ifdef HAS_STORE_CALLBACK
|
| 42 |
+
case CUFFT_CB_ST_COMPLEX: {
|
| 43 |
+
cufftCallbackStoreC h_ptr;
|
| 44 |
+
cudaMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr));
|
| 45 |
+
return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo);
|
| 46 |
+
}
|
| 47 |
+
case CUFFT_CB_ST_COMPLEX_DOUBLE: {
|
| 48 |
+
cufftCallbackStoreZ h_ptr;
|
| 49 |
+
cudaMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr));
|
| 50 |
+
return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo);
|
| 51 |
+
}
|
| 52 |
+
case CUFFT_CB_ST_REAL: {
|
| 53 |
+
cufftCallbackStoreR h_ptr;
|
| 54 |
+
cudaMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr));
|
| 55 |
+
return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo);
|
| 56 |
+
}
|
| 57 |
+
case CUFFT_CB_ST_REAL_DOUBLE: {
|
| 58 |
+
cufftCallbackStoreD h_ptr;
|
| 59 |
+
cudaMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr));
|
| 60 |
+
return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo);
|
| 61 |
+
}
|
| 62 |
+
#endif // HAS_STORE_CALLBACK
|
| 63 |
+
default: {
|
| 64 |
+
throw std::runtime_error("unrecognized callback");
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
}
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cupy_cufftXt.h
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <cufftXt.h>
|
| 2 |
+
#include <cstdint>
|
| 3 |
+
#include <stdexcept>
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
extern "C" {
|
| 7 |
+
|
| 8 |
+
cufftResult set_callback(cufftHandle plan, cufftXtCallbackType cbType, bool cb_load, void** callerInfo=NULL);
|
| 9 |
+
|
| 10 |
+
}
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cupy_thrust.cu
ADDED
|
@@ -0,0 +1,526 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "cupy_thrust.h"
|
| 2 |
+
#include <cupy/type_dispatcher.cuh>
|
| 3 |
+
#include <thrust/device_ptr.h>
|
| 4 |
+
#include <thrust/device_vector.h>
|
| 5 |
+
#include <thrust/iterator/zip_iterator.h>
|
| 6 |
+
#include <thrust/iterator/constant_iterator.h>
|
| 7 |
+
#include <thrust/sequence.h>
|
| 8 |
+
#include <thrust/sort.h>
|
| 9 |
+
#include <thrust/tuple.h>
|
| 10 |
+
#include <thrust/execution_policy.h>
|
| 11 |
+
#include <type_traits>
|
| 12 |
+
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
|
| 13 |
+
// This is used to avoid a problem with constexpr in functions declarations introduced in
|
| 14 |
+
// cuda 11.2, MSVC 15 does not fully support it so we need a dummy constexpr declaration
|
| 15 |
+
// that is provided by this header. However optional.h is only available
|
| 16 |
+
// starting CUDA 10.1
|
| 17 |
+
#include <thrust/optional.h>
|
| 18 |
+
|
| 19 |
+
#ifdef _MSC_VER
|
| 20 |
+
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS constexpr
|
| 21 |
+
#else
|
| 22 |
+
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS THRUST_OPTIONAL_CPP11_CONSTEXPR
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
#endif
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
#if CUPY_USE_HIP
|
| 29 |
+
typedef hipStream_t cudaStream_t;
|
| 30 |
+
namespace cuda {
|
| 31 |
+
using thrust::hip::par;
|
| 32 |
+
}
|
| 33 |
+
#else // #if CUPY_USE_HIP
|
| 34 |
+
namespace cuda {
|
| 35 |
+
using thrust::cuda::par;
|
| 36 |
+
}
|
| 37 |
+
#endif // #if CUPY_USE_HIP
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
extern "C" char *cupy_malloc(void *, size_t);
|
| 41 |
+
extern "C" int cupy_free(void *, char *);
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class cupy_allocator {
|
| 45 |
+
private:
|
| 46 |
+
void* memory;
|
| 47 |
+
|
| 48 |
+
public:
|
| 49 |
+
typedef char value_type;
|
| 50 |
+
|
| 51 |
+
cupy_allocator(void* memory) : memory(memory) {}
|
| 52 |
+
|
| 53 |
+
char *allocate(size_t num_bytes) {
|
| 54 |
+
return cupy_malloc(memory, num_bytes);
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
void deallocate(char *ptr, size_t n) {
|
| 58 |
+
cupy_free(memory, ptr);
|
| 59 |
+
}
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
/*
|
| 64 |
+
* ------------------------------------- Minimum boilerplate for NumPy compatibility --------------------------------------
|
| 65 |
+
* We need a specialized operator< here in order to match the NumPy behavior:
|
| 66 |
+
* "The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is
|
| 67 |
+
* determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts.
|
| 68 |
+
*
|
| 69 |
+
* In numpy versions >= 1.4.0 nan values are sorted to the end. The extended sort order is:
|
| 70 |
+
* Real: [R, nan]
|
| 71 |
+
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
|
| 72 |
+
* where R is a non-nan real value. Complex values with the same nan placements are sorted according to the non-nan part if
|
| 73 |
+
* it exists. Non-nan values are sorted as before."
|
| 74 |
+
* Ref: https://numpy.org/doc/stable/reference/generated/numpy.sort.html
|
| 75 |
+
*/
|
| 76 |
+
|
| 77 |
+
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
|
| 78 |
+
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
|
| 79 |
+
#define ENABLE_HALF
|
| 80 |
+
#endif
|
| 81 |
+
|
| 82 |
+
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
|
| 83 |
+
#define CONSTEXPR_FUNC THRUST_OPTIONAL_CPP11_CONSTEXPR
|
| 84 |
+
#else
|
| 85 |
+
#define CONSTEXPR_FUNC
|
| 86 |
+
#endif
|
| 87 |
+
|
| 88 |
+
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
|
| 89 |
+
#define CONSTEXPR_COMPARATOR THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
|
| 90 |
+
#else
|
| 91 |
+
#define CONSTEXPR_COMPARATOR
|
| 92 |
+
#endif
|
| 93 |
+
|
| 94 |
+
#ifdef ENABLE_HALF
|
| 95 |
+
__host__ __device__ __forceinline__ bool isnan(const __half& x) {
|
| 96 |
+
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
|
| 97 |
+
return __hisnan(x);
|
| 98 |
+
#else
|
| 99 |
+
return false; // This will never be called on the host
|
| 100 |
+
#endif
|
| 101 |
+
}
|
| 102 |
+
#endif // ENABLE_HALF
|
| 103 |
+
|
| 104 |
+
template <typename T>
|
| 105 |
+
__host__ __device__ __forceinline__ CONSTEXPR_FUNC
|
| 106 |
+
static bool real_less(const T& lhs, const T& rhs) {
|
| 107 |
+
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
|
| 108 |
+
if (isnan(lhs)) {
|
| 109 |
+
return false;
|
| 110 |
+
} else if (isnan(rhs)) {
|
| 111 |
+
return true;
|
| 112 |
+
} else {
|
| 113 |
+
return lhs < rhs;
|
| 114 |
+
}
|
| 115 |
+
#else
|
| 116 |
+
return false; // This will be never executed in the host
|
| 117 |
+
#endif
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
template <typename T>
|
| 121 |
+
__host__ __device__ __forceinline__ CONSTEXPR_FUNC
|
| 122 |
+
static bool tuple_less(const thrust::tuple<size_t, T>& lhs,
|
| 123 |
+
const thrust::tuple<size_t, T>& rhs) {
|
| 124 |
+
const size_t& lhs_k = thrust::get<0>(lhs);
|
| 125 |
+
const size_t& rhs_k = thrust::get<0>(rhs);
|
| 126 |
+
const T& lhs_v = thrust::get<1>(lhs);
|
| 127 |
+
const T& rhs_v = thrust::get<1>(rhs);
|
| 128 |
+
|
| 129 |
+
// tuple's comparison rule: compare the 1st member, then 2nd, then 3rd, ...,
|
| 130 |
+
// which should be respected
|
| 131 |
+
if (lhs_k < rhs_k) {
|
| 132 |
+
return true;
|
| 133 |
+
} else if (lhs_k == rhs_k) {
|
| 134 |
+
// same key, compare values
|
| 135 |
+
// note that we can't rely on native operator< due to NaN, so we rely on our custom comparison object
|
| 136 |
+
return real_less(lhs_v, rhs_v);
|
| 137 |
+
} else {
|
| 138 |
+
return false;
|
| 139 |
+
}
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
/*
|
| 143 |
+
* ********** complex numbers **********
|
| 144 |
+
* We need a custom comparator because we can't overload operator< for complex numbers...
|
| 145 |
+
*/
|
| 146 |
+
|
| 147 |
+
template <typename T>
|
| 148 |
+
__host__ __device__ __forceinline__ CONSTEXPR_FUNC
|
| 149 |
+
static bool complex_less(const T& lhs, const T& rhs) {
|
| 150 |
+
const bool lhsRe = isnan(lhs.real());
|
| 151 |
+
const bool lhsIm = isnan(lhs.imag());
|
| 152 |
+
const bool rhsRe = isnan(rhs.real());
|
| 153 |
+
const bool rhsIm = isnan(rhs.imag());
|
| 154 |
+
|
| 155 |
+
// neither side has nan
|
| 156 |
+
if (!lhsRe && !lhsIm && !rhsRe && !rhsIm) {
|
| 157 |
+
return lhs < rhs;
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
// one side has nan, and the other does not
|
| 161 |
+
if (!lhsRe && !lhsIm && (rhsRe || rhsIm)) {
|
| 162 |
+
return true;
|
| 163 |
+
}
|
| 164 |
+
if ((lhsRe || lhsIm) && !rhsRe && !rhsIm) {
|
| 165 |
+
return false;
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
// pick 2 from 3 possibilities (R + nanj, nan + Rj, nan + nanj)
|
| 169 |
+
if (lhsRe && !rhsRe) {
|
| 170 |
+
return false;
|
| 171 |
+
}
|
| 172 |
+
if (!lhsRe && rhsRe) {
|
| 173 |
+
return true;
|
| 174 |
+
}
|
| 175 |
+
if (lhsIm && !rhsIm) {
|
| 176 |
+
return false;
|
| 177 |
+
}
|
| 178 |
+
if (!lhsIm && rhsIm) {
|
| 179 |
+
return true;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
// pick 1 from 3 and compare the numerical values (nan+nan*I compares to itself as false)
|
| 183 |
+
return (((lhsIm && rhsIm) && (lhs.real() < rhs.real())) || ((lhsRe && rhsRe) && (lhs.imag() < rhs.imag())));
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
// Type function giving us the right comparison operator. We use a custom one for all the specializations below,
|
| 187 |
+
// but otherwise just default to thrust::less. We notable do not define a specialization for float and double, since
|
| 188 |
+
// thrust uses radix sort for them and sorts NaNs to the back.
|
| 189 |
+
template <typename T>
|
| 190 |
+
struct select_less {
|
| 191 |
+
using type = thrust::less<T>;
|
| 192 |
+
};
|
| 193 |
+
|
| 194 |
+
// complex numbers
|
| 195 |
+
|
| 196 |
+
template <>
|
| 197 |
+
struct select_less<complex<float>> {
|
| 198 |
+
struct type {
|
| 199 |
+
__host__ __device__ __forceinline__ CONSTEXPR_COMPARATOR
|
| 200 |
+
bool operator() (
|
| 201 |
+
const complex<float>& lhs, const complex<float>& rhs) const {
|
| 202 |
+
return complex_less(lhs, rhs);
|
| 203 |
+
}
|
| 204 |
+
};
|
| 205 |
+
};
|
| 206 |
+
|
| 207 |
+
template <>
|
| 208 |
+
struct select_less<complex<double>> {
|
| 209 |
+
struct type {
|
| 210 |
+
__host__ __device__ __forceinline__ CONSTEXPR_COMPARATOR
|
| 211 |
+
bool operator() (
|
| 212 |
+
const complex<double>& lhs, const complex<double>& rhs) const {
|
| 213 |
+
return complex_less(lhs, rhs);
|
| 214 |
+
}
|
| 215 |
+
};
|
| 216 |
+
};
|
| 217 |
+
|
| 218 |
+
template <>
|
| 219 |
+
struct select_less<thrust::tuple<size_t, complex<float>>> {
|
| 220 |
+
struct type {
|
| 221 |
+
__host__ __device__ __forceinline__ CONSTEXPR_COMPARATOR
|
| 222 |
+
bool operator() (
|
| 223 |
+
const thrust::tuple<size_t, complex<float>>& lhs, const thrust::tuple<size_t, complex<float>>& rhs) const {
|
| 224 |
+
return tuple_less(lhs, rhs);
|
| 225 |
+
}
|
| 226 |
+
};
|
| 227 |
+
};
|
| 228 |
+
|
| 229 |
+
template <>
|
| 230 |
+
struct select_less<thrust::tuple<size_t, complex<double>>> {
|
| 231 |
+
struct type {
|
| 232 |
+
__host__ __device__ __forceinline__ CONSTEXPR_COMPARATOR
|
| 233 |
+
bool operator() (
|
| 234 |
+
const thrust::tuple<size_t, complex<double>>& lhs, const thrust::tuple<size_t, complex<double>>& rhs) const {
|
| 235 |
+
return tuple_less(lhs, rhs);
|
| 236 |
+
}
|
| 237 |
+
};
|
| 238 |
+
};
|
| 239 |
+
|
| 240 |
+
template <>
|
| 241 |
+
struct select_less<thrust::tuple<size_t, float>> {
|
| 242 |
+
struct type {
|
| 243 |
+
__host__ __device__ __forceinline__ CONSTEXPR_COMPARATOR
|
| 244 |
+
bool operator() (
|
| 245 |
+
const thrust::tuple<size_t, float>& lhs, const thrust::tuple<size_t, float>& rhs) const {
|
| 246 |
+
return tuple_less(lhs, rhs);
|
| 247 |
+
}
|
| 248 |
+
};
|
| 249 |
+
};
|
| 250 |
+
|
| 251 |
+
template <>
|
| 252 |
+
struct select_less<thrust::tuple<size_t, double>> {
|
| 253 |
+
struct type {
|
| 254 |
+
__host__ __device__ __forceinline__ CONSTEXPR_COMPARATOR
|
| 255 |
+
bool operator() (
|
| 256 |
+
const thrust::tuple<size_t, double>& lhs, const thrust::tuple<size_t, double>& rhs) const {
|
| 257 |
+
return tuple_less(lhs, rhs);
|
| 258 |
+
}
|
| 259 |
+
};
|
| 260 |
+
};
|
| 261 |
+
|
| 262 |
+
// floating points
|
| 263 |
+
|
| 264 |
+
template <>
|
| 265 |
+
struct select_less<float> {
|
| 266 |
+
struct type {
|
| 267 |
+
__host__ __device__ __forceinline__ CONSTEXPR_COMPARATOR
|
| 268 |
+
bool operator() (const float& lhs, const float& rhs) const {
|
| 269 |
+
return real_less(lhs, rhs);
|
| 270 |
+
}
|
| 271 |
+
};
|
| 272 |
+
};
|
| 273 |
+
|
| 274 |
+
template <>
|
| 275 |
+
struct select_less<double> {
|
| 276 |
+
struct type {
|
| 277 |
+
__host__ __device__ __forceinline__ CONSTEXPR_COMPARATOR
|
| 278 |
+
bool operator() (const double& lhs, const double& rhs) const {
|
| 279 |
+
return real_less(lhs, rhs);
|
| 280 |
+
}
|
| 281 |
+
};
|
| 282 |
+
};
|
| 283 |
+
|
| 284 |
+
#ifdef ENABLE_HALF
|
| 285 |
+
template <>
|
| 286 |
+
struct select_less<__half> {
|
| 287 |
+
struct type {
|
| 288 |
+
__host__ __device__ __forceinline__ CONSTEXPR_COMPARATOR
|
| 289 |
+
bool operator() (const __half& lhs, const __half& rhs) const {
|
| 290 |
+
return real_less(lhs, rhs);
|
| 291 |
+
}
|
| 292 |
+
};
|
| 293 |
+
};
|
| 294 |
+
|
| 295 |
+
template <>
|
| 296 |
+
struct select_less<thrust::tuple<size_t, __half>> {
|
| 297 |
+
struct type {
|
| 298 |
+
__host__ __device__ __forceinline__ CONSTEXPR_COMPARATOR
|
| 299 |
+
bool operator() (
|
| 300 |
+
const thrust::tuple<size_t, __half>& lhs, const thrust::tuple<size_t, __half>& rhs) const {
|
| 301 |
+
|
| 302 |
+
return tuple_less(lhs, rhs);
|
| 303 |
+
}
|
| 304 |
+
};
|
| 305 |
+
};
|
| 306 |
+
#endif // ENABLE_HALF
|
| 307 |
+
|
| 308 |
+
/*
|
| 309 |
+
* -------------------------------------------------- end of boilerplate --------------------------------------------------
|
| 310 |
+
*/
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
/*
|
| 314 |
+
* sort
|
| 315 |
+
*/
|
| 316 |
+
|
| 317 |
+
struct _sort {
|
| 318 |
+
template <typename T>
|
| 319 |
+
__forceinline__ void operator()(void *data_start, size_t *keys_start,
|
| 320 |
+
const std::vector<ptrdiff_t>& shape, intptr_t stream,
|
| 321 |
+
void* memory) {
|
| 322 |
+
size_t ndim = shape.size();
|
| 323 |
+
ptrdiff_t size;
|
| 324 |
+
thrust::device_ptr<T> dp_data_first, dp_data_last;
|
| 325 |
+
thrust::device_ptr<size_t> dp_keys_first, dp_keys_last;
|
| 326 |
+
cudaStream_t stream_ = (cudaStream_t)stream;
|
| 327 |
+
cupy_allocator alloc(memory);
|
| 328 |
+
|
| 329 |
+
// Compute the total size of the array.
|
| 330 |
+
size = shape[0];
|
| 331 |
+
for (size_t i = 1; i < ndim; ++i) {
|
| 332 |
+
size *= shape[i];
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
dp_data_first = thrust::device_pointer_cast(static_cast<T*>(data_start));
|
| 336 |
+
dp_data_last = thrust::device_pointer_cast(static_cast<T*>(data_start) + size);
|
| 337 |
+
|
| 338 |
+
if (ndim == 1) {
|
| 339 |
+
// we use thrust::less directly to sort floating points, because then it can use radix sort, which happens to sort NaNs to the back
|
| 340 |
+
using compare_op = std::conditional_t<std::is_floating_point<T>::value, thrust::less<T>, typename select_less<T>::type>;
|
| 341 |
+
stable_sort(cuda::par(alloc).on(stream_), dp_data_first, dp_data_last, compare_op{});
|
| 342 |
+
} else {
|
| 343 |
+
// Generate key indices.
|
| 344 |
+
dp_keys_first = thrust::device_pointer_cast(keys_start);
|
| 345 |
+
dp_keys_last = thrust::device_pointer_cast(keys_start + size);
|
| 346 |
+
transform(cuda::par(alloc).on(stream_),
|
| 347 |
+
#ifdef __HIP_PLATFORM_HCC__
|
| 348 |
+
rocprim::make_counting_iterator<size_t>(0),
|
| 349 |
+
rocprim::make_counting_iterator<size_t>(size),
|
| 350 |
+
rocprim::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
|
| 351 |
+
#else
|
| 352 |
+
thrust::make_counting_iterator<size_t>(0),
|
| 353 |
+
thrust::make_counting_iterator<size_t>(size),
|
| 354 |
+
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
|
| 355 |
+
#endif
|
| 356 |
+
dp_keys_first,
|
| 357 |
+
thrust::divides<size_t>());
|
| 358 |
+
|
| 359 |
+
stable_sort(
|
| 360 |
+
cuda::par(alloc).on(stream_),
|
| 361 |
+
make_zip_iterator(dp_keys_first, dp_data_first),
|
| 362 |
+
make_zip_iterator(dp_keys_last, dp_data_last),
|
| 363 |
+
typename select_less<thrust::tuple<size_t, T>>::type{});
|
| 364 |
+
}
|
| 365 |
+
}
|
| 366 |
+
};
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
/*
|
| 370 |
+
* lexsort
|
| 371 |
+
*/
|
| 372 |
+
|
| 373 |
+
template <typename T>
|
| 374 |
+
class elem_less {
|
| 375 |
+
public:
|
| 376 |
+
elem_less(const T *data):_data(data) {}
|
| 377 |
+
__device__ __forceinline__ bool operator()(size_t i, size_t j) const {
|
| 378 |
+
return typename select_less<T>::type{}(_data[i], _data[j]);
|
| 379 |
+
}
|
| 380 |
+
private:
|
| 381 |
+
const T *_data;
|
| 382 |
+
};
|
| 383 |
+
|
| 384 |
+
struct _lexsort {
|
| 385 |
+
template <typename T>
|
| 386 |
+
__forceinline__ void operator()(size_t *idx_start, void *keys_start, size_t k,
|
| 387 |
+
size_t n, intptr_t stream, void *memory) {
|
| 388 |
+
/* idx_start is the beginning of the output array where the indexes that
|
| 389 |
+
would sort the data will be placed. The original contents of idx_start
|
| 390 |
+
will be destroyed. */
|
| 391 |
+
thrust::device_ptr<size_t> dp_first = thrust::device_pointer_cast(idx_start);
|
| 392 |
+
thrust::device_ptr<size_t> dp_last = thrust::device_pointer_cast(idx_start + n);
|
| 393 |
+
cudaStream_t stream_ = (cudaStream_t)stream;
|
| 394 |
+
cupy_allocator alloc(memory);
|
| 395 |
+
sequence(cuda::par(alloc).on(stream_), dp_first, dp_last);
|
| 396 |
+
for (size_t i = 0; i < k; ++i) {
|
| 397 |
+
T *key_start = static_cast<T*>(keys_start) + i * n;
|
| 398 |
+
stable_sort(
|
| 399 |
+
cuda::par(alloc).on(stream_),
|
| 400 |
+
dp_first,
|
| 401 |
+
dp_last,
|
| 402 |
+
elem_less<T>(key_start)
|
| 403 |
+
);
|
| 404 |
+
}
|
| 405 |
+
}
|
| 406 |
+
};
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
/*
|
| 410 |
+
* argsort
|
| 411 |
+
*/
|
| 412 |
+
|
| 413 |
+
struct _argsort {
|
| 414 |
+
template <typename T>
|
| 415 |
+
__forceinline__ void operator()(size_t *idx_start, void *data_start,
|
| 416 |
+
void *keys_start,
|
| 417 |
+
const std::vector<ptrdiff_t>& shape,
|
| 418 |
+
intptr_t stream, void *memory) {
|
| 419 |
+
/* idx_start is the beginning of the output array where the indexes that
|
| 420 |
+
would sort the data will be placed. The original contents of idx_start
|
| 421 |
+
will be destroyed. */
|
| 422 |
+
|
| 423 |
+
size_t ndim = shape.size();
|
| 424 |
+
ptrdiff_t size;
|
| 425 |
+
cudaStream_t stream_ = (cudaStream_t)stream;
|
| 426 |
+
cupy_allocator alloc(memory);
|
| 427 |
+
|
| 428 |
+
thrust::device_ptr<size_t> dp_idx_first, dp_idx_last;
|
| 429 |
+
thrust::device_ptr<T> dp_data_first, dp_data_last;
|
| 430 |
+
thrust::device_ptr<size_t> dp_keys_first, dp_keys_last;
|
| 431 |
+
|
| 432 |
+
// Compute the total size of the data array.
|
| 433 |
+
size = shape[0];
|
| 434 |
+
for (size_t i = 1; i < ndim; ++i) {
|
| 435 |
+
size *= shape[i];
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
// Cast device pointers of data.
|
| 439 |
+
dp_data_first = thrust::device_pointer_cast(static_cast<T*>(data_start));
|
| 440 |
+
dp_data_last = thrust::device_pointer_cast(static_cast<T*>(data_start) + size);
|
| 441 |
+
|
| 442 |
+
// Generate an index sequence.
|
| 443 |
+
dp_idx_first = thrust::device_pointer_cast(static_cast<size_t*>(idx_start));
|
| 444 |
+
dp_idx_last = thrust::device_pointer_cast(static_cast<size_t*>(idx_start) + size);
|
| 445 |
+
transform(cuda::par(alloc).on(stream_),
|
| 446 |
+
#ifdef __HIP_PLATFORM_HCC__
|
| 447 |
+
rocprim::make_counting_iterator<size_t>(0),
|
| 448 |
+
rocprim::make_counting_iterator<size_t>(size),
|
| 449 |
+
rocprim::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
|
| 450 |
+
#else
|
| 451 |
+
thrust::make_counting_iterator<size_t>(0),
|
| 452 |
+
thrust::make_counting_iterator<size_t>(size),
|
| 453 |
+
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
|
| 454 |
+
#endif
|
| 455 |
+
dp_idx_first,
|
| 456 |
+
thrust::modulus<size_t>());
|
| 457 |
+
|
| 458 |
+
if (ndim == 1) {
|
| 459 |
+
// we use thrust::less directly to sort floating points, because then it can use radix sort, which happens to sort NaNs to the back
|
| 460 |
+
using compare_op = std::conditional_t<std::is_floating_point<T>::value, thrust::less<T>, typename select_less<T>::type>;
|
| 461 |
+
// Sort the index sequence by data.
|
| 462 |
+
stable_sort_by_key(cuda::par(alloc).on(stream_),
|
| 463 |
+
dp_data_first,
|
| 464 |
+
dp_data_last,
|
| 465 |
+
dp_idx_first,
|
| 466 |
+
compare_op{});
|
| 467 |
+
} else {
|
| 468 |
+
// Generate key indices.
|
| 469 |
+
dp_keys_first = thrust::device_pointer_cast(static_cast<size_t*>(keys_start));
|
| 470 |
+
dp_keys_last = thrust::device_pointer_cast(static_cast<size_t*>(keys_start) + size);
|
| 471 |
+
transform(cuda::par(alloc).on(stream_),
|
| 472 |
+
#ifdef __HIP_PLATFORM_HCC__
|
| 473 |
+
rocprim::make_counting_iterator<size_t>(0),
|
| 474 |
+
rocprim::make_counting_iterator<size_t>(size),
|
| 475 |
+
rocprim::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
|
| 476 |
+
#else
|
| 477 |
+
thrust::make_counting_iterator<size_t>(0),
|
| 478 |
+
thrust::make_counting_iterator<size_t>(size),
|
| 479 |
+
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
|
| 480 |
+
#endif
|
| 481 |
+
dp_keys_first,
|
| 482 |
+
thrust::divides<size_t>());
|
| 483 |
+
|
| 484 |
+
stable_sort_by_key(
|
| 485 |
+
cuda::par(alloc).on(stream_),
|
| 486 |
+
make_zip_iterator(dp_keys_first, dp_data_first),
|
| 487 |
+
make_zip_iterator(dp_keys_last, dp_data_last),
|
| 488 |
+
dp_idx_first,
|
| 489 |
+
typename select_less<thrust::tuple<size_t, T>>::type{});
|
| 490 |
+
}
|
| 491 |
+
}
|
| 492 |
+
};
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
//
|
| 496 |
+
// APIs exposed to CuPy
|
| 497 |
+
//
|
| 498 |
+
|
| 499 |
+
/* -------- sort -------- */
|
| 500 |
+
|
| 501 |
+
void thrust_sort(int dtype_id, void *data_start, size_t *keys_start,
|
| 502 |
+
const std::vector<ptrdiff_t>& shape, intptr_t stream, void* memory) {
|
| 503 |
+
|
| 504 |
+
_sort op;
|
| 505 |
+
return dtype_dispatcher(dtype_id, op, data_start, keys_start, shape, stream, memory);
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
/* -------- lexsort -------- */
|
| 510 |
+
void thrust_lexsort(int dtype_id, size_t *idx_start, void *keys_start, size_t k,
|
| 511 |
+
size_t n, intptr_t stream, void *memory) {
|
| 512 |
+
|
| 513 |
+
_lexsort op;
|
| 514 |
+
return dtype_dispatcher(dtype_id, op, idx_start, keys_start, k, n, stream, memory);
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
/* -------- argsort -------- */
|
| 519 |
+
void thrust_argsort(int dtype_id, size_t *idx_start, void *data_start,
|
| 520 |
+
void *keys_start, const std::vector<ptrdiff_t>& shape, intptr_t stream, void *memory) {
|
| 521 |
+
|
| 522 |
+
_argsort op;
|
| 523 |
+
return dtype_dispatcher(dtype_id, op, idx_start, data_start, keys_start, shape,
|
| 524 |
+
stream, memory);
|
| 525 |
+
}
|
| 526 |
+
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/cutensor.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
cuTENSOR Wrapper
|
| 3 |
+
|
| 4 |
+
Use `cupy_backends.cuda.libs.cutensor` directly in CuPy codebase.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
available = True
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
from cupy_backends.cuda.libs.cutensor import * # NOQA
|
| 11 |
+
except ImportError as e:
|
| 12 |
+
available = False
|
| 13 |
+
from cupy._environment import _preload_warning
|
| 14 |
+
_preload_warning('cutensor', e)
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cupy.cuda.memory_hooks import debug_print # NOQA
|
| 2 |
+
from cupy.cuda.memory_hooks import line_profile # NOQA
|
| 3 |
+
|
| 4 |
+
# import class and function
|
| 5 |
+
from cupy.cuda.memory_hooks.debug_print import DebugPrintHook # NOQA
|
| 6 |
+
from cupy.cuda.memory_hooks.line_profile import LineProfileHook # NOQA
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (421 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/__pycache__/debug_print.cpython-310.pyc
ADDED
|
Binary file (3.36 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/__pycache__/line_profile.cpython-310.pyc
ADDED
|
Binary file (6.67 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/debug_print.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
from cupy.cuda import memory_hook
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class DebugPrintHook(memory_hook.MemoryHook):
|
| 7 |
+
"""Memory hook that prints debug information.
|
| 8 |
+
|
| 9 |
+
This memory hook outputs the debug information of input arguments of
|
| 10 |
+
``malloc`` and ``free`` methods involved in the hooked functions
|
| 11 |
+
at postprocessing time (that is, just after each method is called).
|
| 12 |
+
|
| 13 |
+
Example:
|
| 14 |
+
The basic usage is to use it with ``with`` statement.
|
| 15 |
+
|
| 16 |
+
Code example::
|
| 17 |
+
|
| 18 |
+
>>> import cupy
|
| 19 |
+
>>> from cupy.cuda import memory_hooks
|
| 20 |
+
>>>
|
| 21 |
+
>>> cupy.cuda.set_allocator(cupy.cuda.MemoryPool().malloc)
|
| 22 |
+
>>> with memory_hooks.DebugPrintHook():
|
| 23 |
+
... x = cupy.array([1, 2, 3])
|
| 24 |
+
... del x # doctest:+SKIP
|
| 25 |
+
|
| 26 |
+
Output example::
|
| 27 |
+
|
| 28 |
+
{"hook":"alloc","device_id":0,"mem_size":512,"mem_ptr":150496608256}
|
| 29 |
+
{"hook":"malloc","device_id":0,"size":24,"mem_size":512,"mem_ptr":150496608256,"pmem_id":"0x7f39200c5278"}
|
| 30 |
+
{"hook":"free","device_id":0,"mem_size":512,"mem_ptr":150496608256,"pmem_id":"0x7f39200c5278"}
|
| 31 |
+
|
| 32 |
+
where the output format is JSONL (JSON Lines) and
|
| 33 |
+
``hook`` is the name of hook point, and
|
| 34 |
+
``device_id`` is the CUDA Device ID, and
|
| 35 |
+
``size`` is the requested memory size to allocate, and
|
| 36 |
+
``mem_size`` is the rounded memory size to be allocated, and
|
| 37 |
+
``mem_ptr`` is the memory pointer, and
|
| 38 |
+
``pmem_id`` is the pooled memory object ID.
|
| 39 |
+
|
| 40 |
+
Attributes:
|
| 41 |
+
file: Output file_like object that redirect to.
|
| 42 |
+
flush: If ``True``, this hook forcibly flushes the text stream
|
| 43 |
+
at the end of print. The default is ``True``.
|
| 44 |
+
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
name = 'DebugPrintHook'
|
| 48 |
+
|
| 49 |
+
def __init__(self, file=sys.stdout, flush=True):
|
| 50 |
+
self.file = file
|
| 51 |
+
self.flush = flush
|
| 52 |
+
|
| 53 |
+
def _print(self, msg):
|
| 54 |
+
self.file.write(msg)
|
| 55 |
+
self.file.write('\n')
|
| 56 |
+
if self.flush:
|
| 57 |
+
self.file.flush()
|
| 58 |
+
|
| 59 |
+
def alloc_postprocess(self, **kwargs):
|
| 60 |
+
msg = '{"hook":"%s","device_id":%d,' \
|
| 61 |
+
'"mem_size":%d,"mem_ptr":%d}'
|
| 62 |
+
msg %= ('alloc', kwargs['device_id'],
|
| 63 |
+
kwargs['mem_size'], kwargs['mem_ptr'])
|
| 64 |
+
self._print(msg)
|
| 65 |
+
|
| 66 |
+
def malloc_postprocess(self, **kwargs):
|
| 67 |
+
msg = '{"hook":"%s","device_id":%d,"size":%d,' \
|
| 68 |
+
'"mem_size":%d,"mem_ptr":%d,"pmem_id":"%s"}'
|
| 69 |
+
msg %= ('malloc', kwargs['device_id'], kwargs['size'],
|
| 70 |
+
kwargs['mem_size'], kwargs['mem_ptr'], hex(kwargs['pmem_id']))
|
| 71 |
+
self._print(msg)
|
| 72 |
+
|
| 73 |
+
def free_postprocess(self, **kwargs):
|
| 74 |
+
msg = '{"hook":"%s","device_id":%d,' \
|
| 75 |
+
'"mem_size":%d,"mem_ptr":%d,"pmem_id":"%s"}'
|
| 76 |
+
msg %= ('free', kwargs['device_id'],
|
| 77 |
+
kwargs['mem_size'], kwargs['mem_ptr'], hex(kwargs['pmem_id']))
|
| 78 |
+
self._print(msg)
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/memory_hooks/line_profile.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from os import path
|
| 2 |
+
import sys
|
| 3 |
+
import traceback
|
| 4 |
+
|
| 5 |
+
from cupy.cuda import memory_hook
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class LineProfileHook(memory_hook.MemoryHook):
|
| 9 |
+
"""Code line CuPy memory profiler.
|
| 10 |
+
|
| 11 |
+
This profiler shows line-by-line GPU memory consumption using traceback
|
| 12 |
+
module. But, note that it can trace only CPython level, no Cython level.
|
| 13 |
+
ref. https://github.com/cython/cython/issues/1755
|
| 14 |
+
|
| 15 |
+
Example:
|
| 16 |
+
Code example::
|
| 17 |
+
|
| 18 |
+
from cupy.cuda import memory_hooks
|
| 19 |
+
hook = memory_hooks.LineProfileHook()
|
| 20 |
+
with hook:
|
| 21 |
+
# some CuPy codes
|
| 22 |
+
hook.print_report()
|
| 23 |
+
|
| 24 |
+
Output example::
|
| 25 |
+
|
| 26 |
+
_root (4.00KB, 4.00KB)
|
| 27 |
+
lib/python3.6/unittest/__main__.py:18:<module> (4.00KB, 4.00KB)
|
| 28 |
+
lib/python3.6/unittest/main.py:255:runTests (4.00KB, 4.00KB)
|
| 29 |
+
tests/cupy_tests/test.py:37:test (1.00KB, 1.00KB)
|
| 30 |
+
tests/cupy_tests/test.py:38:test (1.00KB, 1.00KB)
|
| 31 |
+
tests/cupy_tests/test.py:39:test (2.00KB, 2.00KB)
|
| 32 |
+
|
| 33 |
+
Each line shows::
|
| 34 |
+
|
| 35 |
+
{filename}:{lineno}:{func_name} ({used_bytes}, {acquired_bytes})
|
| 36 |
+
|
| 37 |
+
where *used_bytes* is the memory bytes used from CuPy memory pool, and
|
| 38 |
+
*acquired_bytes* is the actual memory bytes the CuPy memory pool
|
| 39 |
+
acquired from GPU device.
|
| 40 |
+
*_root* is a root node of the stack trace to show total memory usage.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
max_depth (int): maximum depth to follow stack traces.
|
| 44 |
+
Default is 0 (no limit).
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
name = 'LineProfileHook'
|
| 48 |
+
|
| 49 |
+
def __init__(self, max_depth=0):
|
| 50 |
+
self._memory_frames = {}
|
| 51 |
+
self._root = MemoryFrame(None, None)
|
| 52 |
+
self._filename = path.abspath(__file__)
|
| 53 |
+
self._max_depth = max_depth
|
| 54 |
+
|
| 55 |
+
# callback
|
| 56 |
+
def malloc_preprocess(self, device_id, size, mem_size):
|
| 57 |
+
self._create_frame_tree(used_bytes=mem_size)
|
| 58 |
+
|
| 59 |
+
# callback
|
| 60 |
+
def alloc_preprocess(self, device_id, mem_size):
|
| 61 |
+
self._create_frame_tree(acquired_bytes=mem_size)
|
| 62 |
+
|
| 63 |
+
def _create_frame_tree(self, used_bytes=0, acquired_bytes=0):
|
| 64 |
+
self._root.used_bytes += used_bytes
|
| 65 |
+
self._root.acquired_bytes += acquired_bytes
|
| 66 |
+
parent = self._root
|
| 67 |
+
for depth, stackframe in enumerate(self._extract_stackframes()):
|
| 68 |
+
if 0 < self._max_depth <= depth + 1:
|
| 69 |
+
break
|
| 70 |
+
memory_frame = self._add_frame(parent, stackframe)
|
| 71 |
+
memory_frame.used_bytes += used_bytes
|
| 72 |
+
memory_frame.acquired_bytes += acquired_bytes
|
| 73 |
+
parent = memory_frame
|
| 74 |
+
|
| 75 |
+
def _extract_stackframes(self):
|
| 76 |
+
stackframes = traceback.extract_stack()
|
| 77 |
+
stackframes = [StackFrame(st) for st in stackframes]
|
| 78 |
+
stackframes = [
|
| 79 |
+
st for st in stackframes if st.filename != self._filename]
|
| 80 |
+
return stackframes
|
| 81 |
+
|
| 82 |
+
def _key_frame(self, parent, stackframe):
|
| 83 |
+
return (parent,
|
| 84 |
+
stackframe.filename,
|
| 85 |
+
stackframe.lineno,
|
| 86 |
+
stackframe.name)
|
| 87 |
+
|
| 88 |
+
def _add_frame(self, parent, stackframe):
|
| 89 |
+
key = self._key_frame(parent, stackframe)
|
| 90 |
+
if key in self._memory_frames:
|
| 91 |
+
memory_frame = self._memory_frames[key]
|
| 92 |
+
else:
|
| 93 |
+
memory_frame = MemoryFrame(parent, stackframe)
|
| 94 |
+
self._memory_frames[key] = memory_frame
|
| 95 |
+
return memory_frame
|
| 96 |
+
|
| 97 |
+
def print_report(self, file=sys.stdout):
|
| 98 |
+
"""Prints a report of line memory profiling."""
|
| 99 |
+
line = '_root (%s, %s)\n' % self._root.humanized_bytes()
|
| 100 |
+
file.write(line)
|
| 101 |
+
for child in self._root.children:
|
| 102 |
+
self._print_frame(child, depth=1, file=file)
|
| 103 |
+
file.flush()
|
| 104 |
+
|
| 105 |
+
def _print_frame(self, memory_frame, depth=0, file=sys.stdout):
|
| 106 |
+
indent = ' ' * (depth * 2)
|
| 107 |
+
st = memory_frame.stackframe
|
| 108 |
+
used_bytes, acquired_bytes = memory_frame.humanized_bytes()
|
| 109 |
+
line = '%s%s:%s:%s (%s, %s)\n' % (
|
| 110 |
+
indent, st.filename, st.lineno, st.name,
|
| 111 |
+
used_bytes, acquired_bytes)
|
| 112 |
+
file.write(line)
|
| 113 |
+
for child in memory_frame.children:
|
| 114 |
+
self._print_frame(child, depth=depth + 1, file=file)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class StackFrame(object):
|
| 118 |
+
"""Compatibility layer for outputs of traceback.extract_stack().
|
| 119 |
+
|
| 120 |
+
Attributes:
|
| 121 |
+
filename (string): filename
|
| 122 |
+
lineno (int): line number
|
| 123 |
+
name (string): function name
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
def __init__(self, obj):
|
| 127 |
+
if isinstance(obj, tuple): # < 3.5
|
| 128 |
+
self.filename = obj[0]
|
| 129 |
+
self.lineno = obj[1]
|
| 130 |
+
self.name = obj[2]
|
| 131 |
+
else: # >= 3.5 FrameSummary
|
| 132 |
+
self.filename = obj.filename
|
| 133 |
+
self.lineno = obj.lineno
|
| 134 |
+
self.name = obj.name
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class MemoryFrame(object):
|
| 138 |
+
"""A single stack frame along with sum of memory usage at the frame.
|
| 139 |
+
|
| 140 |
+
Attributes:
|
| 141 |
+
stackframe (FrameSummary): stackframe from traceback.extract_stack().
|
| 142 |
+
parent (MemoryFrame): parent frame, that is, caller.
|
| 143 |
+
children (list of MemoryFrame): child frames, that is, callees.
|
| 144 |
+
used_bytes (int): memory bytes that users used from CuPy memory pool.
|
| 145 |
+
acquired_bytes (int): memory bytes that CuPy memory pool acquired
|
| 146 |
+
from GPU device.
|
| 147 |
+
"""
|
| 148 |
+
|
| 149 |
+
def __init__(self, parent, stackframe):
|
| 150 |
+
self.stackframe = stackframe
|
| 151 |
+
self.children = []
|
| 152 |
+
self._set_parent(parent)
|
| 153 |
+
self.used_bytes = 0
|
| 154 |
+
self.acquired_bytes = 0
|
| 155 |
+
|
| 156 |
+
def humanized_bytes(self):
|
| 157 |
+
used_bytes = self._humanized_size(self.used_bytes)
|
| 158 |
+
acquired_bytes = self._humanized_size(self.acquired_bytes)
|
| 159 |
+
return (used_bytes, acquired_bytes)
|
| 160 |
+
|
| 161 |
+
def _set_parent(self, parent):
|
| 162 |
+
if parent and parent not in parent.children:
|
| 163 |
+
self.parent = parent
|
| 164 |
+
parent.children.append(self)
|
| 165 |
+
|
| 166 |
+
def _humanized_size(self, size):
|
| 167 |
+
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E']:
|
| 168 |
+
if size < 1024.0:
|
| 169 |
+
return '%3.2f%sB' % (size, unit)
|
| 170 |
+
size /= 1024.0
|
| 171 |
+
return '%.2f%sB' % (size, 'Z')
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/nccl.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NCCL Wrapper
|
| 3 |
+
|
| 4 |
+
Use `cupy_backends.cuda.libs.nccl` directly in CuPy codebase.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from cupy import _environment
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
available = True
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
_environment._preload_library('nccl')
|
| 14 |
+
from cupy_backends.cuda.libs.nccl import * # NOQA
|
| 15 |
+
except ImportError as e:
|
| 16 |
+
available = False
|
| 17 |
+
_environment._preload_warning('nccl', e)
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/nvtx.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from cupy_backends.cuda.libs.nvtx import * # NOQA
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/profiler.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# For backward compatibility only.
|
| 2 |
+
from cupy_backends.cuda.api.runtime import profilerStart as start # noqa
|
| 3 |
+
from cupy_backends.cuda.api.runtime import profilerStop as stop # noqa
|
infer_4_30_0/lib/python3.10/site-packages/cupy/cuda/runtime.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from cupy_backends.cuda.api.runtime import * # NOQA
|
infer_4_30_0/lib/python3.10/site-packages/cupy/fft/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cupy.fft._fft import fft # NOQA
|
| 2 |
+
from cupy.fft._fft import fft2 # NOQA
|
| 3 |
+
from cupy.fft._fft import fftfreq # NOQA
|
| 4 |
+
from cupy.fft._fft import fftn # NOQA
|
| 5 |
+
from cupy.fft._fft import fftshift # NOQA
|
| 6 |
+
from cupy.fft._fft import hfft # NOQA
|
| 7 |
+
from cupy.fft._fft import ifft # NOQA
|
| 8 |
+
from cupy.fft._fft import ifft2 # NOQA
|
| 9 |
+
from cupy.fft._fft import ifftn # NOQA
|
| 10 |
+
from cupy.fft._fft import ifftshift # NOQA
|
| 11 |
+
from cupy.fft._fft import ihfft # NOQA
|
| 12 |
+
from cupy.fft._fft import irfft # NOQA
|
| 13 |
+
from cupy.fft._fft import irfft2 # NOQA
|
| 14 |
+
from cupy.fft._fft import irfftn # NOQA
|
| 15 |
+
from cupy.fft._fft import rfft # NOQA
|
| 16 |
+
from cupy.fft._fft import rfft2 # NOQA
|
| 17 |
+
from cupy.fft._fft import rfftfreq # NOQA
|
| 18 |
+
from cupy.fft._fft import rfftn # NOQA
|
| 19 |
+
from cupy.fft import config # NOQA
|
infer_4_30_0/lib/python3.10/site-packages/cupy/fft/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (731 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/fft/__pycache__/_fft.cpython-310.pyc
ADDED
|
Binary file (30.4 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/fft/__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (2.27 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/fft/config.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cupy import _util
|
| 2 |
+
|
| 3 |
+
# expose cache handles to this module
|
| 4 |
+
from cupy.fft._cache import get_plan_cache # NOQA
|
| 5 |
+
from cupy.fft._cache import clear_plan_cache # NOQA
|
| 6 |
+
from cupy.fft._cache import get_plan_cache_size # NOQA
|
| 7 |
+
from cupy.fft._cache import set_plan_cache_size # NOQA
|
| 8 |
+
from cupy.fft._cache import get_plan_cache_max_memsize # NOQA
|
| 9 |
+
from cupy.fft._cache import set_plan_cache_max_memsize # NOQA
|
| 10 |
+
from cupy.fft._cache import show_plan_cache_info # NOQA
|
| 11 |
+
|
| 12 |
+
# on Linux, expose callback handles to this module
|
| 13 |
+
import sys as _sys
|
| 14 |
+
if _sys.platform.startswith('linux'):
|
| 15 |
+
from cupy.fft._callback import get_current_callback_manager # NOQA
|
| 16 |
+
from cupy.fft._callback import set_cufft_callbacks # NOQA
|
| 17 |
+
else:
|
| 18 |
+
def get_current_callback_manager(*args, **kwargs):
|
| 19 |
+
return None
|
| 20 |
+
|
| 21 |
+
class set_cufft_callbacks: # type: ignore
|
| 22 |
+
def __init__(self, *args, **kwargs):
|
| 23 |
+
raise RuntimeError('cuFFT callback is only available on Linux')
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
enable_nd_planning = True
|
| 27 |
+
use_multi_gpus = False
|
| 28 |
+
_devices = None
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def set_cufft_gpus(gpus):
|
| 32 |
+
'''Set the GPUs to be used in multi-GPU FFT.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
gpus (int or list of int): The number of GPUs or a list of GPUs
|
| 36 |
+
to be used. For the former case, the first ``gpus`` GPUs
|
| 37 |
+
will be used.
|
| 38 |
+
|
| 39 |
+
.. warning::
|
| 40 |
+
This API is currently experimental and may be changed in the future
|
| 41 |
+
version.
|
| 42 |
+
|
| 43 |
+
.. seealso:: `Multiple GPU cuFFT Transforms`_
|
| 44 |
+
|
| 45 |
+
.. _Multiple GPU cuFFT Transforms:
|
| 46 |
+
https://docs.nvidia.com/cuda/cufft/index.html#multiple-GPU-cufft-transforms
|
| 47 |
+
'''
|
| 48 |
+
_util.experimental('cupy.fft.config.set_cufft_gpus')
|
| 49 |
+
global _devices
|
| 50 |
+
|
| 51 |
+
if isinstance(gpus, int):
|
| 52 |
+
devs = [i for i in range(gpus)]
|
| 53 |
+
elif isinstance(gpus, list):
|
| 54 |
+
devs = gpus
|
| 55 |
+
else:
|
| 56 |
+
raise ValueError("gpus must be an int or a list of int.")
|
| 57 |
+
if len(devs) <= 1:
|
| 58 |
+
raise ValueError("Must use at least 2 GPUs.")
|
| 59 |
+
|
| 60 |
+
# make it hashable
|
| 61 |
+
_devices = tuple(devs)
|
infer_4_30_0/lib/python3.10/site-packages/cupy/lib/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from cupy.lib import stride_tricks # NOQA
|
infer_4_30_0/lib/python3.10/site-packages/cupy/lib/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (212 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/lib/__pycache__/_routines_poly.cpython-310.pyc
ADDED
|
Binary file (11.5 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/lib/__pycache__/_shape_base.cpython-310.pyc
ADDED
|
Binary file (2.18 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/cupy/lib/__pycache__/stride_tricks.cpython-310.pyc
ADDED
|
Binary file (5.13 kB). View file
|
|
|