ZTWHHH commited on
Commit
a6dc34a
·
verified ·
1 Parent(s): 882e559

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. evalkit_cambrian/share/terminfo/t/terminology +0 -0
  3. evalkit_tf449/lib/python3.10/site-packages/_distutils_hack/__init__.py +239 -0
  4. evalkit_tf449/lib/python3.10/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc +0 -0
  5. evalkit_tf449/lib/python3.10/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc +0 -0
  6. evalkit_tf449/lib/python3.10/site-packages/_distutils_hack/override.py +1 -0
  7. evalkit_tf449/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc +0 -0
  8. evalkit_tf449/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 +3 -0
  9. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/__init__.py +0 -0
  10. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/__pycache__/__init__.cpython-310.pyc +0 -0
  11. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/__init__.py +0 -0
  12. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc +0 -0
  13. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand.h +1077 -0
  14. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete.h +87 -0
  15. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete2.h +253 -0
  16. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_globals.h +93 -0
  17. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h +1677 -0
  18. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_lognormal.h +697 -0
  19. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_mrg32k3a.h +0 -0
  20. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32.h +210 -0
  21. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_host.h +516 -0
  22. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h +386 -0
  23. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32dc_p_11213.h +0 -0
  24. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_normal.h +840 -0
  25. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_normal_static.h +134 -0
  26. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_philox4x32_x.h +195 -0
  27. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_poisson.h +763 -0
  28. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_precalc.h +0 -0
  29. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_uniform.h +498 -0
  30. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/lib/__init__.py +0 -0
  31. evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  32. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/__pycache__/__init__.cpython-310.pyc +0 -0
  33. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/__init__.py +0 -0
  34. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/__pycache__/__init__.cpython-310.pyc +0 -0
  35. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverDn.h +0 -0
  36. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverMg.h +318 -0
  37. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverRf.h +339 -0
  38. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverSp.h +923 -0
  39. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h +1107 -0
  40. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolver_common.h +261 -0
  41. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/__init__.py +0 -0
  42. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/__pycache__/__init__.cpython-310.pyc +0 -0
  43. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/include/__init__.py +0 -0
  44. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc +0 -0
  45. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/include/cusparse.h +0 -0
  46. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/include/cusparse_v2.h +54 -0
  47. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/lib/__init__.py +0 -0
  48. evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  49. evalkit_tf449/lib/python3.10/site-packages/pandas/__init__.py +367 -0
  50. evalkit_tf449/lib/python3.10/site-packages/pandas/_libs/__init__.py +27 -0
.gitattributes CHANGED
@@ -1189,3 +1189,5 @@ evalkit_cambrian/lib/python3.10/site-packages/matplotlib/mpl-data/fonts/ttf/Deja
1189
  evalkit_tf449/lib/python3.10/site-packages/fontTools/subset/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1190
  evalkit_tf449/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
1191
  evalkit_cambrian/lib/python3.10/site-packages/matplotlib/__pycache__/figure.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
1189
  evalkit_tf449/lib/python3.10/site-packages/fontTools/subset/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1190
  evalkit_tf449/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
1191
  evalkit_cambrian/lib/python3.10/site-packages/matplotlib/__pycache__/figure.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1192
+ janusflow/bin/python filter=lfs diff=lfs merge=lfs -text
1193
+ evalkit_tf449/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
evalkit_cambrian/share/terminfo/t/terminology ADDED
Binary file (3.42 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/_distutils_hack/__init__.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # don't import any costly modules
2
+ import os
3
+ import sys
4
+
5
+ report_url = (
6
+ "https://github.com/pypa/setuptools/issues/new?template=distutils-deprecation.yml"
7
+ )
8
+
9
+
10
+ def warn_distutils_present():
11
+ if 'distutils' not in sys.modules:
12
+ return
13
+ import warnings
14
+
15
+ warnings.warn(
16
+ "Distutils was imported before Setuptools, but importing Setuptools "
17
+ "also replaces the `distutils` module in `sys.modules`. This may lead "
18
+ "to undesirable behaviors or errors. To avoid these issues, avoid "
19
+ "using distutils directly, ensure that setuptools is installed in the "
20
+ "traditional way (e.g. not an editable install), and/or make sure "
21
+ "that setuptools is always imported before distutils."
22
+ )
23
+
24
+
25
+ def clear_distutils():
26
+ if 'distutils' not in sys.modules:
27
+ return
28
+ import warnings
29
+
30
+ warnings.warn(
31
+ "Setuptools is replacing distutils. Support for replacing "
32
+ "an already imported distutils is deprecated. In the future, "
33
+ "this condition will fail. "
34
+ f"Register concerns at {report_url}"
35
+ )
36
+ mods = [
37
+ name
38
+ for name in sys.modules
39
+ if name == "distutils" or name.startswith("distutils.")
40
+ ]
41
+ for name in mods:
42
+ del sys.modules[name]
43
+
44
+
45
+ def enabled():
46
+ """
47
+ Allow selection of distutils by environment variable.
48
+ """
49
+ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
50
+ if which == 'stdlib':
51
+ import warnings
52
+
53
+ warnings.warn(
54
+ "Reliance on distutils from stdlib is deprecated. Users "
55
+ "must rely on setuptools to provide the distutils module. "
56
+ "Avoid importing distutils or import setuptools first, "
57
+ "and avoid setting SETUPTOOLS_USE_DISTUTILS=stdlib. "
58
+ f"Register concerns at {report_url}"
59
+ )
60
+ return which == 'local'
61
+
62
+
63
+ def ensure_local_distutils():
64
+ import importlib
65
+
66
+ clear_distutils()
67
+
68
+ # With the DistutilsMetaFinder in place,
69
+ # perform an import to cause distutils to be
70
+ # loaded from setuptools._distutils. Ref #2906.
71
+ with shim():
72
+ importlib.import_module('distutils')
73
+
74
+ # check that submodules load as expected
75
+ core = importlib.import_module('distutils.core')
76
+ assert '_distutils' in core.__file__, core.__file__
77
+ assert 'setuptools._distutils.log' not in sys.modules
78
+
79
+
80
+ def do_override():
81
+ """
82
+ Ensure that the local copy of distutils is preferred over stdlib.
83
+
84
+ See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
85
+ for more motivation.
86
+ """
87
+ if enabled():
88
+ warn_distutils_present()
89
+ ensure_local_distutils()
90
+
91
+
92
+ class _TrivialRe:
93
+ def __init__(self, *patterns) -> None:
94
+ self._patterns = patterns
95
+
96
+ def match(self, string):
97
+ return all(pat in string for pat in self._patterns)
98
+
99
+
100
+ class DistutilsMetaFinder:
101
+ def find_spec(self, fullname, path, target=None):
102
+ # optimization: only consider top level modules and those
103
+ # found in the CPython test suite.
104
+ if path is not None and not fullname.startswith('test.'):
105
+ return None
106
+
107
+ method_name = 'spec_for_{fullname}'.format(**locals())
108
+ method = getattr(self, method_name, lambda: None)
109
+ return method()
110
+
111
+ def spec_for_distutils(self):
112
+ if self.is_cpython():
113
+ return None
114
+
115
+ import importlib
116
+ import importlib.abc
117
+ import importlib.util
118
+
119
+ try:
120
+ mod = importlib.import_module('setuptools._distutils')
121
+ except Exception:
122
+ # There are a couple of cases where setuptools._distutils
123
+ # may not be present:
124
+ # - An older Setuptools without a local distutils is
125
+ # taking precedence. Ref #2957.
126
+ # - Path manipulation during sitecustomize removes
127
+ # setuptools from the path but only after the hook
128
+ # has been loaded. Ref #2980.
129
+ # In either case, fall back to stdlib behavior.
130
+ return None
131
+
132
+ class DistutilsLoader(importlib.abc.Loader):
133
+ def create_module(self, spec):
134
+ mod.__name__ = 'distutils'
135
+ return mod
136
+
137
+ def exec_module(self, module):
138
+ pass
139
+
140
+ return importlib.util.spec_from_loader(
141
+ 'distutils', DistutilsLoader(), origin=mod.__file__
142
+ )
143
+
144
+ @staticmethod
145
+ def is_cpython():
146
+ """
147
+ Suppress supplying distutils for CPython (build and tests).
148
+ Ref #2965 and #3007.
149
+ """
150
+ return os.path.isfile('pybuilddir.txt')
151
+
152
+ def spec_for_pip(self):
153
+ """
154
+ Ensure stdlib distutils when running under pip.
155
+ See pypa/pip#8761 for rationale.
156
+ """
157
+ if sys.version_info >= (3, 12) or self.pip_imported_during_build():
158
+ return
159
+ clear_distutils()
160
+ self.spec_for_distutils = lambda: None
161
+
162
+ @classmethod
163
+ def pip_imported_during_build(cls):
164
+ """
165
+ Detect if pip is being imported in a build script. Ref #2355.
166
+ """
167
+ import traceback
168
+
169
+ return any(
170
+ cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
171
+ )
172
+
173
+ @staticmethod
174
+ def frame_file_is_setup(frame):
175
+ """
176
+ Return True if the indicated frame suggests a setup.py file.
177
+ """
178
+ # some frames may not have __file__ (#2940)
179
+ return frame.f_globals.get('__file__', '').endswith('setup.py')
180
+
181
+ def spec_for_sensitive_tests(self):
182
+ """
183
+ Ensure stdlib distutils when running select tests under CPython.
184
+
185
+ python/cpython#91169
186
+ """
187
+ clear_distutils()
188
+ self.spec_for_distutils = lambda: None
189
+
190
+ sensitive_tests = (
191
+ [
192
+ 'test.test_distutils',
193
+ 'test.test_peg_generator',
194
+ 'test.test_importlib',
195
+ ]
196
+ if sys.version_info < (3, 10)
197
+ else [
198
+ 'test.test_distutils',
199
+ ]
200
+ )
201
+
202
+
203
+ for name in DistutilsMetaFinder.sensitive_tests:
204
+ setattr(
205
+ DistutilsMetaFinder,
206
+ f'spec_for_{name}',
207
+ DistutilsMetaFinder.spec_for_sensitive_tests,
208
+ )
209
+
210
+
211
+ DISTUTILS_FINDER = DistutilsMetaFinder()
212
+
213
+
214
+ def add_shim():
215
+ DISTUTILS_FINDER in sys.meta_path or insert_shim()
216
+
217
+
218
+ class shim:
219
+ def __enter__(self) -> None:
220
+ insert_shim()
221
+
222
+ def __exit__(self, exc: object, value: object, tb: object) -> None:
223
+ _remove_shim()
224
+
225
+
226
+ def insert_shim():
227
+ sys.meta_path.insert(0, DISTUTILS_FINDER)
228
+
229
+
230
+ def _remove_shim():
231
+ try:
232
+ sys.meta_path.remove(DISTUTILS_FINDER)
233
+ except ValueError:
234
+ pass
235
+
236
+
237
+ if sys.version_info < (3, 12):
238
+ # DistutilsMetaFinder can only be disabled in Python < 3.12 (PEP 632)
239
+ remove_shim = _remove_shim
evalkit_tf449/lib/python3.10/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (8.21 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/_distutils_hack/override.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __import__('_distutils_hack').do_override()
evalkit_tf449/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (165 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2307a5acfccc9b40f989384038218cfead564cd43633701d30c893047e744f44
3
+ size 974888
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/__init__.py ADDED
File without changes
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/__init__.py ADDED
File without changes
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand.h ADDED
@@ -0,0 +1,1077 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CURAND_H_)
51
+ #define CURAND_H_
52
+
53
+ /**
54
+ * \defgroup HOST Host API
55
+ *
56
+ * @{
57
+ */
58
+ #ifndef __CUDACC_RTC__
59
+ #include <cuda_runtime.h>
60
+ #endif
61
+
62
+ #ifndef CURANDAPI
63
+ #ifdef _WIN32
64
+ #define CURANDAPI __stdcall
65
+ #else
66
+ #define CURANDAPI
67
+ #endif
68
+ #endif
69
+
70
+ #if defined(__cplusplus)
71
+ extern "C" {
72
+ #endif /* __cplusplus */
73
+
74
+ #define CURAND_VER_MAJOR 10
75
+ #define CURAND_VER_MINOR 3
76
+ #define CURAND_VER_PATCH 5
77
+ #define CURAND_VER_BUILD 147
78
+ #define CURAND_VERSION (CURAND_VER_MAJOR * 1000 + \
79
+ CURAND_VER_MINOR * 100 + \
80
+ CURAND_VER_PATCH)
81
+ /* CURAND Host API datatypes */
82
+
83
+ /**
84
+ * @{
85
+ */
86
+
87
+ /**
88
+ * CURAND function call status types
89
+ */
90
+ enum curandStatus {
91
+ CURAND_STATUS_SUCCESS = 0, ///< No errors
92
+ CURAND_STATUS_VERSION_MISMATCH = 100, ///< Header file and linked library version do not match
93
+ CURAND_STATUS_NOT_INITIALIZED = 101, ///< Generator not initialized
94
+ CURAND_STATUS_ALLOCATION_FAILED = 102, ///< Memory allocation failed
95
+ CURAND_STATUS_TYPE_ERROR = 103, ///< Generator is wrong type
96
+ CURAND_STATUS_OUT_OF_RANGE = 104, ///< Argument out of range
97
+ CURAND_STATUS_LENGTH_NOT_MULTIPLE = 105, ///< Length requested is not a multple of dimension
98
+ CURAND_STATUS_DOUBLE_PRECISION_REQUIRED = 106, ///< GPU does not have double precision required by MRG32k3a
99
+ CURAND_STATUS_LAUNCH_FAILURE = 201, ///< Kernel launch failure
100
+ CURAND_STATUS_PREEXISTING_FAILURE = 202, ///< Preexisting failure on library entry
101
+ CURAND_STATUS_INITIALIZATION_FAILED = 203, ///< Initialization of CUDA failed
102
+ CURAND_STATUS_ARCH_MISMATCH = 204, ///< Architecture mismatch, GPU does not support requested feature
103
+ CURAND_STATUS_INTERNAL_ERROR = 999 ///< Internal library error
104
+ };
105
+
106
+ /*
107
+ * CURAND function call status types
108
+ */
109
+ /** \cond UNHIDE_TYPEDEFS */
110
+ typedef enum curandStatus curandStatus_t;
111
+ /** \endcond */
112
+
113
+ /**
114
+ * CURAND generator types
115
+ */
116
+ enum curandRngType {
117
+ CURAND_RNG_TEST = 0,
118
+ CURAND_RNG_PSEUDO_DEFAULT = 100, ///< Default pseudorandom generator
119
+ CURAND_RNG_PSEUDO_XORWOW = 101, ///< XORWOW pseudorandom generator
120
+ CURAND_RNG_PSEUDO_MRG32K3A = 121, ///< MRG32k3a pseudorandom generator
121
+ CURAND_RNG_PSEUDO_MTGP32 = 141, ///< Mersenne Twister MTGP32 pseudorandom generator
122
+ CURAND_RNG_PSEUDO_MT19937 = 142, ///< Mersenne Twister MT19937 pseudorandom generator
123
+ CURAND_RNG_PSEUDO_PHILOX4_32_10 = 161, ///< PHILOX-4x32-10 pseudorandom generator
124
+ CURAND_RNG_QUASI_DEFAULT = 200, ///< Default quasirandom generator
125
+ CURAND_RNG_QUASI_SOBOL32 = 201, ///< Sobol32 quasirandom generator
126
+ CURAND_RNG_QUASI_SCRAMBLED_SOBOL32 = 202, ///< Scrambled Sobol32 quasirandom generator
127
+ CURAND_RNG_QUASI_SOBOL64 = 203, ///< Sobol64 quasirandom generator
128
+ CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 = 204 ///< Scrambled Sobol64 quasirandom generator
129
+ };
130
+
131
+ /*
132
+ * CURAND generator types
133
+ */
134
+ /** \cond UNHIDE_TYPEDEFS */
135
+ typedef enum curandRngType curandRngType_t;
136
+ /** \endcond */
137
+
138
+ /**
139
+ * CURAND ordering of results in memory
140
+ */
141
+ enum curandOrdering {
142
+ CURAND_ORDERING_PSEUDO_BEST = 100, ///< Best ordering for pseudorandom results
143
+ CURAND_ORDERING_PSEUDO_DEFAULT = 101, ///< Specific default thread sequence for pseudorandom results, same as CURAND_ORDERING_PSEUDO_BEST
144
+ CURAND_ORDERING_PSEUDO_SEEDED = 102, ///< Specific seeding pattern for fast lower quality pseudorandom results
145
+ CURAND_ORDERING_PSEUDO_LEGACY = 103, ///< Specific legacy sequence for pseudorandom results, guaranteed to remain the same for all cuRAND release
146
+ CURAND_ORDERING_PSEUDO_DYNAMIC = 104, ///< Specific ordering adjusted to the device it is being executed on, provides the best performance
147
+ CURAND_ORDERING_QUASI_DEFAULT = 201 ///< Specific n-dimensional ordering for quasirandom results
148
+ };
149
+
150
+ /*
151
+ * CURAND ordering of results in memory
152
+ */
153
+ /** \cond UNHIDE_TYPEDEFS */
154
+ typedef enum curandOrdering curandOrdering_t;
155
+ /** \endcond */
156
+
157
+ /**
158
+ * CURAND choice of direction vector set
159
+ */
160
+ enum curandDirectionVectorSet {
161
+ CURAND_DIRECTION_VECTORS_32_JOEKUO6 = 101, ///< Specific set of 32-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions
162
+ CURAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6 = 102, ///< Specific set of 32-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions, and scrambled
163
+ CURAND_DIRECTION_VECTORS_64_JOEKUO6 = 103, ///< Specific set of 64-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions
164
+ CURAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6 = 104 ///< Specific set of 64-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions, and scrambled
165
+ };
166
+
167
+ /*
168
+ * CURAND choice of direction vector set
169
+ */
170
+ /** \cond UNHIDE_TYPEDEFS */
171
+ typedef enum curandDirectionVectorSet curandDirectionVectorSet_t;
172
+ /** \endcond */
173
+
174
+ /**
175
+ * CURAND array of 32-bit direction vectors
176
+ */
177
+ /** \cond UNHIDE_TYPEDEFS */
178
+ typedef unsigned int curandDirectionVectors32_t[32];
179
+ /** \endcond */
180
+
181
+ /**
182
+ * CURAND array of 64-bit direction vectors
183
+ */
184
+ /** \cond UNHIDE_TYPEDEFS */
185
+ typedef unsigned long long curandDirectionVectors64_t[64];
186
+ /** \endcond **/
187
+
188
+ /**
189
+ * CURAND generator (opaque)
190
+ */
191
+ struct curandGenerator_st;
192
+
193
+ /**
194
+ * CURAND generator
195
+ */
196
+ /** \cond UNHIDE_TYPEDEFS */
197
+ typedef struct curandGenerator_st *curandGenerator_t;
198
+ /** \endcond */
199
+
200
+ /**
201
+ * CURAND distribution
202
+ */
203
+ /** \cond UNHIDE_TYPEDEFS */
204
+ typedef double curandDistribution_st;
205
+ typedef curandDistribution_st *curandDistribution_t;
206
+ typedef struct curandDistributionShift_st *curandDistributionShift_t;
207
+ /** \endcond */
208
+ /**
209
+ * CURAND distribution M2
210
+ */
211
+ /** \cond UNHIDE_TYPEDEFS */
212
+ typedef struct curandDistributionM2Shift_st *curandDistributionM2Shift_t;
213
+ typedef struct curandHistogramM2_st *curandHistogramM2_t;
214
+ typedef unsigned int curandHistogramM2K_st;
215
+ typedef curandHistogramM2K_st *curandHistogramM2K_t;
216
+ typedef curandDistribution_st curandHistogramM2V_st;
217
+ typedef curandHistogramM2V_st *curandHistogramM2V_t;
218
+
219
+ typedef struct curandDiscreteDistribution_st *curandDiscreteDistribution_t;
220
+ /** \endcond */
221
+
222
+ /*
223
+ * CURAND METHOD
224
+ */
225
+ /** \cond UNHIDE_ENUMS */
226
+ enum curandMethod {
227
+ CURAND_CHOOSE_BEST = 0, // choose best depends on args
228
+ CURAND_ITR = 1,
229
+ CURAND_KNUTH = 2,
230
+ CURAND_HITR = 3,
231
+ CURAND_M1 = 4,
232
+ CURAND_M2 = 5,
233
+ CURAND_BINARY_SEARCH = 6,
234
+ CURAND_DISCRETE_GAUSS = 7,
235
+ CURAND_REJECTION = 8,
236
+ CURAND_DEVICE_API = 9,
237
+ CURAND_FAST_REJECTION = 10,
238
+ CURAND_3RD = 11,
239
+ CURAND_DEFINITION = 12,
240
+ CURAND_POISSON = 13
241
+ };
242
+
243
+ typedef enum curandMethod curandMethod_t;
244
+ /** \endcond */
245
+
246
+
247
+ #ifndef __CUDACC_RTC__
248
+
249
+ /**
250
+ * @}
251
+ */
252
+
253
+ /**
254
+ * \brief Create new random number generator.
255
+ *
256
+ * Creates a new random number generator of type \p rng_type
257
+ * and returns it in \p *generator.
258
+ *
259
+ * Legal values for \p rng_type are:
260
+ * - CURAND_RNG_PSEUDO_DEFAULT
261
+ * - CURAND_RNG_PSEUDO_XORWOW
262
+ * - CURAND_RNG_PSEUDO_MRG32K3A
263
+ * - CURAND_RNG_PSEUDO_MTGP32
264
+ * - CURAND_RNG_PSEUDO_MT19937
265
+ * - CURAND_RNG_PSEUDO_PHILOX4_32_10
266
+ * - CURAND_RNG_QUASI_DEFAULT
267
+ * - CURAND_RNG_QUASI_SOBOL32
268
+ * - CURAND_RNG_QUASI_SCRAMBLED_SOBOL32
269
+ * - CURAND_RNG_QUASI_SOBOL64
270
+ * - CURAND_RNG_QUASI_SCRAMBLED_SOBOL64
271
+ *
272
+ * When \p rng_type is CURAND_RNG_PSEUDO_DEFAULT, the type chosen
273
+ * is CURAND_RNG_PSEUDO_XORWOW. \n
274
+ * When \p rng_type is CURAND_RNG_QUASI_DEFAULT,
275
+ * the type chosen is CURAND_RNG_QUASI_SOBOL32.
276
+ *
277
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_XORWOW are:
278
+ * - \p seed = 0
279
+ * - \p offset = 0
280
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
281
+ *
282
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MRG32K3A are:
283
+ * - \p seed = 0
284
+ * - \p offset = 0
285
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
286
+ *
287
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MTGP32 are:
288
+ * - \p seed = 0
289
+ * - \p offset = 0
290
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
291
+ *
292
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MT19937 are:
293
+ * - \p seed = 0
294
+ * - \p offset = 0
295
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
296
+ *
297
+ * * The default values for \p rng_type = CURAND_RNG_PSEUDO_PHILOX4_32_10 are:
298
+ * - \p seed = 0
299
+ * - \p offset = 0
300
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
301
+ *
302
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL32 are:
303
+ * - \p dimensions = 1
304
+ * - \p offset = 0
305
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
306
+ *
307
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL64 are:
308
+ * - \p dimensions = 1
309
+ * - \p offset = 0
310
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
311
+ *
312
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBBLED_SOBOL32 are:
313
+ * - \p dimensions = 1
314
+ * - \p offset = 0
315
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
316
+ *
317
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 are:
318
+ * - \p dimensions = 1
319
+ * - \p offset = 0
320
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
321
+ *
322
+ * \param generator - Pointer to generator
323
+ * \param rng_type - Type of generator to create
324
+ *
325
+ * \return
326
+ * - CURAND_STATUS_ALLOCATION_FAILED, if memory could not be allocated \n
327
+ * - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n
328
+ * - CURAND_STATUS_VERSION_MISMATCH if the header file version does not match the
329
+ * dynamically linked library version \n
330
+ * - CURAND_STATUS_TYPE_ERROR if the value for \p rng_type is invalid \n
331
+ * - CURAND_STATUS_SUCCESS if generator was created successfully \n
332
+ *
333
+ */
334
+ curandStatus_t CURANDAPI
335
+ curandCreateGenerator(curandGenerator_t *generator, curandRngType_t rng_type);
336
+
337
+ /**
338
+ * \brief Create new host CPU random number generator.
339
+ *
340
+ * Creates a new host CPU random number generator of type \p rng_type
341
+ * and returns it in \p *generator.
342
+ *
343
+ * Legal values for \p rng_type are:
344
+ * - CURAND_RNG_PSEUDO_DEFAULT
345
+ * - CURAND_RNG_PSEUDO_XORWOW
346
+ * - CURAND_RNG_PSEUDO_MRG32K3A
347
+ * - CURAND_RNG_PSEUDO_MTGP32
348
+ * - CURAND_RNG_PSEUDO_MT19937
349
+ * - CURAND_RNG_PSEUDO_PHILOX4_32_10
350
+ * - CURAND_RNG_QUASI_DEFAULT
351
+ * - CURAND_RNG_QUASI_SOBOL32
352
+ *
353
+ * When \p rng_type is CURAND_RNG_PSEUDO_DEFAULT, the type chosen
354
+ * is CURAND_RNG_PSEUDO_XORWOW. \n
355
+ * When \p rng_type is CURAND_RNG_QUASI_DEFAULT,
356
+ * the type chosen is CURAND_RNG_QUASI_SOBOL32.
357
+ *
358
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_XORWOW are:
359
+ * - \p seed = 0
360
+ * - \p offset = 0
361
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
362
+ *
363
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MRG32K3A are:
364
+ * - \p seed = 0
365
+ * - \p offset = 0
366
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
367
+ *
368
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MTGP32 are:
369
+ * - \p seed = 0
370
+ * - \p offset = 0
371
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
372
+ *
373
+ * The default values for \p rng_type = CURAND_RNG_PSEUDO_MT19937 are:
374
+ * - \p seed = 0
375
+ * - \p offset = 0
376
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
377
+ *
378
+ * * The default values for \p rng_type = CURAND_RNG_PSEUDO_PHILOX4_32_10 are:
379
+ * - \p seed = 0
380
+ * - \p offset = 0
381
+ * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT
382
+ *
383
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL32 are:
384
+ * - \p dimensions = 1
385
+ * - \p offset = 0
386
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
387
+ *
388
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL64 are:
389
+ * - \p dimensions = 1
390
+ * - \p offset = 0
391
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
392
+ *
393
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL32 are:
394
+ * - \p dimensions = 1
395
+ * - \p offset = 0
396
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
397
+ *
398
+ * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 are:
399
+ * - \p dimensions = 1
400
+ * - \p offset = 0
401
+ * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT
402
+ *
403
+ * \param generator - Pointer to generator
404
+ * \param rng_type - Type of generator to create
405
+ *
406
+ * \return
407
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
408
+ * - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n
409
+ * - CURAND_STATUS_VERSION_MISMATCH if the header file version does not match the
410
+ * dynamically linked library version \n
411
+ * - CURAND_STATUS_TYPE_ERROR if the value for \p rng_type is invalid \n
412
+ * - CURAND_STATUS_SUCCESS if generator was created successfully \n
413
+ */
414
+ curandStatus_t CURANDAPI
415
+ curandCreateGeneratorHost(curandGenerator_t *generator, curandRngType_t rng_type);
416
+
417
+ /**
418
+ * \brief Destroy an existing generator.
419
+ *
420
+ * Destroy an existing generator and free all memory associated with its state.
421
+ *
422
+ * \param generator - Generator to destroy
423
+ *
424
+ * \return
425
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
426
+ * - CURAND_STATUS_SUCCESS if generator was destroyed successfully \n
427
+ */
428
+ curandStatus_t CURANDAPI
429
+ curandDestroyGenerator(curandGenerator_t generator);
430
+
431
+ /**
432
+ * \brief Return the version number of the library.
433
+ *
434
+ * Return in \p *version the version number of the dynamically linked CURAND
435
+ * library. The format is the same as CUDART_VERSION from the CUDA Runtime.
436
+ * The only supported configuration is CURAND version equal to CUDA Runtime
437
+ * version.
438
+ *
439
+ * \param version - CURAND library version
440
+ *
441
+ * \return
442
+ * - CURAND_STATUS_SUCCESS if the version number was successfully returned \n
443
+ */
444
+ curandStatus_t CURANDAPI
445
+ curandGetVersion(int *version);
446
+
447
+ /**
448
+ * \brief Return the value of the curand property.
449
+ *
450
+ * Return in \p *value the number for the property described by \p type of the
451
+ * dynamically linked CURAND library.
452
+ *
453
+ * \param type - CUDA library property
454
+ * \param value - integer value for the requested property
455
+ *
456
+ * \return
457
+ * - CURAND_STATUS_SUCCESS if the property value was successfully returned \n
458
+ * - CURAND_STATUS_OUT_OF_RANGE if the property type is not recognized \n
459
+ */
460
+ curandStatus_t CURANDAPI
461
+ curandGetProperty(libraryPropertyType type, int *value);
462
+
463
+
464
+ /**
465
+ * \brief Set the current stream for CURAND kernel launches.
466
+ *
467
+ * Set the current stream for CURAND kernel launches. All library functions
468
+ * will use this stream until set again.
469
+ *
470
+ * \param generator - Generator to modify
471
+ * \param stream - Stream to use or ::NULL for null stream
472
+ *
473
+ * \return
474
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
475
+ * - CURAND_STATUS_SUCCESS if stream was set successfully \n
476
+ */
477
+ curandStatus_t CURANDAPI
478
+ curandSetStream(curandGenerator_t generator, cudaStream_t stream);
479
+
480
+ /**
481
+ * \brief Set the seed value of the pseudo-random number generator.
482
+ *
483
+ * Set the seed value of the pseudorandom number generator.
484
+ * All values of seed are valid. Different seeds will produce different sequences.
485
+ * Different seeds will often not be statistically correlated with each other,
486
+ * but some pairs of seed values may generate sequences which are statistically correlated.
487
+ *
488
+ * \param generator - Generator to modify
489
+ * \param seed - Seed value
490
+ *
491
+ * \return
492
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
493
+ * - CURAND_STATUS_TYPE_ERROR if the generator is not a pseudorandom number generator \n
494
+ * - CURAND_STATUS_SUCCESS if generator seed was set successfully \n
495
+ */
496
+ curandStatus_t CURANDAPI
497
+ curandSetPseudoRandomGeneratorSeed(curandGenerator_t generator, unsigned long long seed);
498
+
499
+ /**
500
+ * \brief Set the absolute offset of the pseudo or quasirandom number generator.
501
+ *
502
+ * Set the absolute offset of the pseudo or quasirandom number generator.
503
+ *
504
+ * All values of offset are valid. The offset position is absolute, not
505
+ * relative to the current position in the sequence.
506
+ *
507
+ * \param generator - Generator to modify
508
+ * \param offset - Absolute offset position
509
+ *
510
+ * \return
511
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
512
+ * - CURAND_STATUS_SUCCESS if generator offset was set successfully \n
513
+ */
514
+ curandStatus_t CURANDAPI
515
+ curandSetGeneratorOffset(curandGenerator_t generator, unsigned long long offset);
516
+
517
+ /**
518
+ * \brief Set the ordering of results of the pseudo or quasirandom number generator.
519
+ *
520
+ * Set the ordering of results of the pseudo or quasirandom number generator.
521
+ *
522
+ * Legal values of \p order for pseudorandom generators are:
523
+ * - CURAND_ORDERING_PSEUDO_DEFAULT
524
+ * - CURAND_ORDERING_PSEUDO_BEST
525
+ * - CURAND_ORDERING_PSEUDO_SEEDED
526
+ * - CURAND_ORDERING_PSEUDO_LEGACY
527
+ *
528
+ * Legal values of \p order for quasirandom generators are:
529
+ * - CURAND_ORDERING_QUASI_DEFAULT
530
+ *
531
+ * \param generator - Generator to modify
532
+ * \param order - Ordering of results
533
+ *
534
+ * \return
535
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
536
+ * - CURAND_STATUS_OUT_OF_RANGE if the ordering is not valid \n
537
+ * - CURAND_STATUS_SUCCESS if generator ordering was set successfully \n
538
+ */
539
+ curandStatus_t CURANDAPI
540
+ curandSetGeneratorOrdering(curandGenerator_t generator, curandOrdering_t order);
541
+
542
+ /**
543
+ * \brief Set the number of dimensions.
544
+ *
545
+ * Set the number of dimensions to be generated by the quasirandom number
546
+ * generator.
547
+ *
548
+ * Legal values for \p num_dimensions are 1 to 20000.
549
+ *
550
+ * \param generator - Generator to modify
551
+ * \param num_dimensions - Number of dimensions
552
+ *
553
+ * \return
554
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
555
+ * - CURAND_STATUS_OUT_OF_RANGE if num_dimensions is not valid \n
556
+ * - CURAND_STATUS_TYPE_ERROR if the generator is not a quasirandom number generator \n
557
+ * - CURAND_STATUS_SUCCESS if generator ordering was set successfully \n
558
+ */
559
+ curandStatus_t CURANDAPI
560
+ curandSetQuasiRandomGeneratorDimensions(curandGenerator_t generator, unsigned int num_dimensions);
561
+
562
+ /**
563
+ * \brief Generate 32-bit pseudo or quasirandom numbers.
564
+ *
565
+ * Use \p generator to generate \p num 32-bit results into the device memory at
566
+ * \p outputPtr. The device memory must have been previously allocated and be
567
+ * large enough to hold all the results. Launches are done with the stream
568
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
569
+ *
570
+ * Results are 32-bit values with every bit random.
571
+ *
572
+ * \param generator - Generator to use
573
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
574
+ * Pointer to host memory to store CPU-generated results
575
+ * \param num - Number of random 32-bit values to generate
576
+ *
577
+ * \return
578
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
579
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
580
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
581
+ * a previous kernel launch \n
582
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
583
+ * not a multiple of the quasirandom dimension \n
584
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
585
+ * - CURAND_STATUS_TYPE_ERROR if the generator is a 64 bit quasirandom generator.
586
+ * (use ::curandGenerateLongLong() with 64 bit quasirandom generators)
587
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
588
+ */
589
+ curandStatus_t CURANDAPI
590
+ curandGenerate(curandGenerator_t generator, unsigned int *outputPtr, size_t num);
591
+
592
+ /**
593
+ * \brief Generate 64-bit quasirandom numbers.
594
+ *
595
+ * Use \p generator to generate \p num 64-bit results into the device memory at
596
+ * \p outputPtr. The device memory must have been previously allocated and be
597
+ * large enough to hold all the results. Launches are done with the stream
598
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
599
+ *
600
+ * Results are 64-bit values with every bit random.
601
+ *
602
+ * \param generator - Generator to use
603
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
604
+ * Pointer to host memory to store CPU-generated results
605
+ * \param num - Number of random 64-bit values to generate
606
+ *
607
+ * \return
608
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
609
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
610
+ * a previous kernel launch \n
611
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
612
+ * not a multiple of the quasirandom dimension \n
613
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
614
+ * - CURAND_STATUS_TYPE_ERROR if the generator is not a 64 bit quasirandom generator\n
615
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
616
+ */
617
+ curandStatus_t CURANDAPI
618
+ curandGenerateLongLong(curandGenerator_t generator, unsigned long long *outputPtr, size_t num);
619
+
620
+ /**
621
+ * \brief Generate uniformly distributed floats.
622
+ *
623
+ * Use \p generator to generate \p num float results into the device memory at
624
+ * \p outputPtr. The device memory must have been previously allocated and be
625
+ * large enough to hold all the results. Launches are done with the stream
626
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
627
+ *
628
+ * Results are 32-bit floating point values between \p 0.0f and \p 1.0f,
629
+ * excluding \p 0.0f and including \p 1.0f.
630
+ *
631
+ * \param generator - Generator to use
632
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
633
+ * Pointer to host memory to store CPU-generated results
634
+ * \param num - Number of floats to generate
635
+ *
636
+ * \return
637
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
638
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
639
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
640
+ * a previous kernel launch \n
641
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
642
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
643
+ * not a multiple of the quasirandom dimension \n
644
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
645
+ */
646
+ curandStatus_t CURANDAPI
647
+ curandGenerateUniform(curandGenerator_t generator, float *outputPtr, size_t num);
648
+
649
+ /**
650
+ * \brief Generate uniformly distributed doubles.
651
+ *
652
+ * Use \p generator to generate \p num double results into the device memory at
653
+ * \p outputPtr. The device memory must have been previously allocated and be
654
+ * large enough to hold all the results. Launches are done with the stream
655
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
656
+ *
657
+ * Results are 64-bit double precision floating point values between
658
+ * \p 0.0 and \p 1.0, excluding \p 0.0 and including \p 1.0.
659
+ *
660
+ * \param generator - Generator to use
661
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
662
+ * Pointer to host memory to store CPU-generated results
663
+ * \param num - Number of doubles to generate
664
+ *
665
+ * \return
666
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
667
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
668
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
669
+ * a previous kernel launch \n
670
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
671
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
672
+ * not a multiple of the quasirandom dimension \n
673
+ * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
674
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
675
+ */
676
+ curandStatus_t CURANDAPI
677
+ curandGenerateUniformDouble(curandGenerator_t generator, double *outputPtr, size_t num);
678
+
679
+ /**
680
+ * \brief Generate normally distributed doubles.
681
+ *
682
+ * Use \p generator to generate \p n float results into the device memory at
683
+ * \p outputPtr. The device memory must have been previously allocated and be
684
+ * large enough to hold all the results. Launches are done with the stream
685
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
686
+ *
687
+ * Results are 32-bit floating point values with mean \p mean and standard
688
+ * deviation \p stddev.
689
+ *
690
+ * Normally distributed results are generated from pseudorandom generators
691
+ * with a Box-Muller transform, and so require \p n to be even.
692
+ * Quasirandom generators use an inverse cumulative distribution
693
+ * function to preserve dimensionality.
694
+ *
695
+ * There may be slight numerical differences between results generated
696
+ * on the GPU with generators created with ::curandCreateGenerator()
697
+ * and results calculated on the CPU with generators created with
698
+ * ::curandCreateGeneratorHost(). These differences arise because of
699
+ * differences in results for transcendental functions. In addition,
700
+ * future versions of CURAND may use newer versions of the CUDA math
701
+ * library, so different versions of CURAND may give slightly different
702
+ * numerical values.
703
+ *
704
+ * \param generator - Generator to use
705
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
706
+ * Pointer to host memory to store CPU-generated results
707
+ * \param n - Number of floats to generate
708
+ * \param mean - Mean of normal distribution
709
+ * \param stddev - Standard deviation of normal distribution
710
+ *
711
+ * \return
712
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
713
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
714
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
715
+ * a previous kernel launch \n
716
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
717
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
718
+ * not a multiple of the quasirandom dimension, or is not a multiple
719
+ * of two for pseudorandom generators \n
720
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
721
+ */
722
+ curandStatus_t CURANDAPI
723
+ curandGenerateNormal(curandGenerator_t generator, float *outputPtr,
724
+ size_t n, float mean, float stddev);
725
+
726
+ /**
727
+ * \brief Generate normally distributed doubles.
728
+ *
729
+ * Use \p generator to generate \p n double results into the device memory at
730
+ * \p outputPtr. The device memory must have been previously allocated and be
731
+ * large enough to hold all the results. Launches are done with the stream
732
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
733
+ *
734
+ * Results are 64-bit floating point values with mean \p mean and standard
735
+ * deviation \p stddev.
736
+ *
737
+ * Normally distributed results are generated from pseudorandom generators
738
+ * with a Box-Muller transform, and so require \p n to be even.
739
+ * Quasirandom generators use an inverse cumulative distribution
740
+ * function to preserve dimensionality.
741
+ *
742
+ * There may be slight numerical differences between results generated
743
+ * on the GPU with generators created with ::curandCreateGenerator()
744
+ * and results calculated on the CPU with generators created with
745
+ * ::curandCreateGeneratorHost(). These differences arise because of
746
+ * differences in results for transcendental functions. In addition,
747
+ * future versions of CURAND may use newer versions of the CUDA math
748
+ * library, so different versions of CURAND may give slightly different
749
+ * numerical values.
750
+ *
751
+ * \param generator - Generator to use
752
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
753
+ * Pointer to host memory to store CPU-generated results
754
+ * \param n - Number of doubles to generate
755
+ * \param mean - Mean of normal distribution
756
+ * \param stddev - Standard deviation of normal distribution
757
+ *
758
+ * \return
759
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
760
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
761
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
762
+ * a previous kernel launch \n
763
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
764
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
765
+ * not a multiple of the quasirandom dimension, or is not a multiple
766
+ * of two for pseudorandom generators \n
767
+ * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
768
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
769
+ */
770
+ curandStatus_t CURANDAPI
771
+ curandGenerateNormalDouble(curandGenerator_t generator, double *outputPtr,
772
+ size_t n, double mean, double stddev);
773
+
774
+ /**
775
+ * \brief Generate log-normally distributed floats.
776
+ *
777
+ * Use \p generator to generate \p n float results into the device memory at
778
+ * \p outputPtr. The device memory must have been previously allocated and be
779
+ * large enough to hold all the results. Launches are done with the stream
780
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
781
+ *
782
+ * Results are 32-bit floating point values with log-normal distribution based on
783
+ * an associated normal distribution with mean \p mean and standard deviation \p stddev.
784
+ *
785
+ * Normally distributed results are generated from pseudorandom generators
786
+ * with a Box-Muller transform, and so require \p n to be even.
787
+ * Quasirandom generators use an inverse cumulative distribution
788
+ * function to preserve dimensionality.
789
+ * The normally distributed results are transformed into log-normal distribution.
790
+ *
791
+ * There may be slight numerical differences between results generated
792
+ * on the GPU with generators created with ::curandCreateGenerator()
793
+ * and results calculated on the CPU with generators created with
794
+ * ::curandCreateGeneratorHost(). These differences arise because of
795
+ * differences in results for transcendental functions. In addition,
796
+ * future versions of CURAND may use newer versions of the CUDA math
797
+ * library, so different versions of CURAND may give slightly different
798
+ * numerical values.
799
+ *
800
+ * \param generator - Generator to use
801
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
802
+ * Pointer to host memory to store CPU-generated results
803
+ * \param n - Number of floats to generate
804
+ * \param mean - Mean of associated normal distribution
805
+ * \param stddev - Standard deviation of associated normal distribution
806
+ *
807
+ * \return
808
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
809
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
810
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
811
+ * a previous kernel launch \n
812
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
813
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
814
+ * not a multiple of the quasirandom dimension, or is not a multiple
815
+ * of two for pseudorandom generators \n
816
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
817
+ */
818
+ curandStatus_t CURANDAPI
819
+ curandGenerateLogNormal(curandGenerator_t generator, float *outputPtr,
820
+ size_t n, float mean, float stddev);
821
+
822
+ /**
823
+ * \brief Generate log-normally distributed doubles.
824
+ *
825
+ * Use \p generator to generate \p n double results into the device memory at
826
+ * \p outputPtr. The device memory must have been previously allocated and be
827
+ * large enough to hold all the results. Launches are done with the stream
828
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
829
+ *
830
+ * Results are 64-bit floating point values with log-normal distribution based on
831
+ * an associated normal distribution with mean \p mean and standard deviation \p stddev.
832
+ *
833
+ * Normally distributed results are generated from pseudorandom generators
834
+ * with a Box-Muller transform, and so require \p n to be even.
835
+ * Quasirandom generators use an inverse cumulative distribution
836
+ * function to preserve dimensionality.
837
+ * The normally distributed results are transformed into log-normal distribution.
838
+ *
839
+ * There may be slight numerical differences between results generated
840
+ * on the GPU with generators created with ::curandCreateGenerator()
841
+ * and results calculated on the CPU with generators created with
842
+ * ::curandCreateGeneratorHost(). These differences arise because of
843
+ * differences in results for transcendental functions. In addition,
844
+ * future versions of CURAND may use newer versions of the CUDA math
845
+ * library, so different versions of CURAND may give slightly different
846
+ * numerical values.
847
+ *
848
+ * \param generator - Generator to use
849
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
850
+ * Pointer to host memory to store CPU-generated results
851
+ * \param n - Number of doubles to generate
852
+ * \param mean - Mean of normal distribution
853
+ * \param stddev - Standard deviation of normal distribution
854
+ *
855
+ * \return
856
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
857
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
858
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
859
+ * a previous kernel launch \n
860
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
861
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
862
+ * not a multiple of the quasirandom dimension, or is not a multiple
863
+ * of two for pseudorandom generators \n
864
+ * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
865
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
866
+ */
867
+ curandStatus_t CURANDAPI
868
+ curandGenerateLogNormalDouble(curandGenerator_t generator, double *outputPtr,
869
+ size_t n, double mean, double stddev);
870
+
871
+ /**
872
+ * \brief Construct the histogram array for a Poisson distribution.
873
+ *
874
+ * Construct the histogram array for the Poisson distribution with lambda \p lambda.
875
+ * For lambda greater than 2000, an approximation with a normal distribution is used.
876
+ *
877
+ * \param lambda - lambda for the Poisson distribution
878
+ *
879
+ *
880
+ * \param discrete_distribution - pointer to the histogram in device memory
881
+ *
882
+ * \return
883
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
884
+ * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n
885
+ * - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n
886
+ * - CURAND_STATUS_NOT_INITIALIZED if the distribution pointer was null \n
887
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
888
+ * a previous kernel launch \n
889
+ * - CURAND_STATUS_OUT_OF_RANGE if lambda is non-positive or greater than 400,000 \n
890
+ * - CURAND_STATUS_SUCCESS if the histogram was generated successfully \n
891
+ */
892
+
893
+ curandStatus_t CURANDAPI
894
+ curandCreatePoissonDistribution(double lambda, curandDiscreteDistribution_t *discrete_distribution);
895
+
896
+
897
+
898
+ /**
899
+ * \brief Destroy the histogram array for a discrete distribution (e.g. Poisson).
900
+ *
901
+ * Destroy the histogram array for a discrete distribution created by curandCreatePoissonDistribution.
902
+ *
903
+ * \param discrete_distribution - pointer to device memory where the histogram is stored
904
+ *
905
+ * \return
906
+ * - CURAND_STATUS_NOT_INITIALIZED if the histogram was never created \n
907
+ * - CURAND_STATUS_SUCCESS if the histogram was destroyed successfully \n
908
+ */
909
+ curandStatus_t CURANDAPI
910
+ curandDestroyDistribution(curandDiscreteDistribution_t discrete_distribution);
911
+
912
+
913
+ /**
914
+ * \brief Generate Poisson-distributed unsigned ints.
915
+ *
916
+ * Use \p generator to generate \p n unsigned int results into device memory at
917
+ * \p outputPtr. The device memory must have been previously allocated and must be
918
+ * large enough to hold all the results. Launches are done with the stream
919
+ * set using ::curandSetStream(), or the null stream if no stream has been set.
920
+ *
921
+ * Results are 32-bit unsigned int point values with Poisson distribution, with lambda \p lambda.
922
+ *
923
+ * \param generator - Generator to use
924
+ * \param outputPtr - Pointer to device memory to store CUDA-generated results, or
925
+ * Pointer to host memory to store CPU-generated results
926
+ * \param n - Number of unsigned ints to generate
927
+ * \param lambda - lambda for the Poisson distribution
928
+ *
929
+ * \return
930
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
931
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
932
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
933
+ * a previous kernel launch \n
934
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
935
+ * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is
936
+ * not a multiple of the quasirandom dimension\n
937
+ * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU or sm does not support double precision \n
938
+ * - CURAND_STATUS_OUT_OF_RANGE if lambda is non-positive or greater than 400,000 \n
939
+ * - CURAND_STATUS_SUCCESS if the results were generated successfully \n
940
+ */
941
+
942
+ curandStatus_t CURANDAPI
943
+ curandGeneratePoisson(curandGenerator_t generator, unsigned int *outputPtr,
944
+ size_t n, double lambda);
945
+ // just for internal usage
946
+ curandStatus_t CURANDAPI
947
+ curandGeneratePoissonMethod(curandGenerator_t generator, unsigned int *outputPtr,
948
+ size_t n, double lambda, curandMethod_t method);
949
+
950
+
951
+ curandStatus_t CURANDAPI
952
+ curandGenerateBinomial(curandGenerator_t generator, unsigned int *outputPtr,
953
+ size_t num, unsigned int n, double p);
954
+ // just for internal usage
955
+ curandStatus_t CURANDAPI
956
+ curandGenerateBinomialMethod(curandGenerator_t generator,
957
+ unsigned int *outputPtr,
958
+ size_t num, unsigned int n, double p,
959
+ curandMethod_t method);
960
+
961
+
962
+ /**
963
+ * \brief Setup starting states.
964
+ *
965
+ * Generate the starting state of the generator. This function is
966
+ * automatically called by generation functions such as
967
+ * ::curandGenerate() and ::curandGenerateUniform().
968
+ * It can be called manually for performance testing reasons to separate
969
+ * timings for starting state generation and random number generation.
970
+ *
971
+ * \param generator - Generator to update
972
+ *
973
+ * \return
974
+ * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n
975
+ * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n
976
+ * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from
977
+ * a previous kernel launch \n
978
+ * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n
979
+ * - CURAND_STATUS_SUCCESS if the seeds were generated successfully \n
980
+ */
981
+ curandStatus_t CURANDAPI
982
+ curandGenerateSeeds(curandGenerator_t generator);
983
+
984
+ /**
985
+ * \brief Get direction vectors for 32-bit quasirandom number generation.
986
+ *
987
+ * Get a pointer to an array of direction vectors that can be used
988
+ * for quasirandom number generation. The resulting pointer will
989
+ * reference an array of direction vectors in host memory.
990
+ *
991
+ * The array contains vectors for many dimensions. Each dimension
992
+ * has 32 vectors. Each individual vector is an unsigned int.
993
+ *
994
+ * Legal values for \p set are:
995
+ * - CURAND_DIRECTION_VECTORS_32_JOEKUO6 (20,000 dimensions)
996
+ * - CURAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6 (20,000 dimensions)
997
+ *
998
+ * \param vectors - Address of pointer in which to return direction vectors
999
+ * \param set - Which set of direction vectors to use
1000
+ *
1001
+ * \return
1002
+ * - CURAND_STATUS_OUT_OF_RANGE if the choice of set is invalid \n
1003
+ * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
1004
+ */
1005
+ curandStatus_t CURANDAPI
1006
+ curandGetDirectionVectors32(curandDirectionVectors32_t *vectors[], curandDirectionVectorSet_t set);
1007
+
1008
+ /**
1009
+ * \brief Get scramble constants for 32-bit scrambled Sobol' .
1010
+ *
1011
+ * Get a pointer to an array of scramble constants that can be used
1012
+ * for quasirandom number generation. The resulting pointer will
1013
+ * reference an array of unsinged ints in host memory.
1014
+ *
1015
+ * The array contains constants for many dimensions. Each dimension
1016
+ * has a single unsigned int constant.
1017
+ *
1018
+ * \param constants - Address of pointer in which to return scramble constants
1019
+ *
1020
+ * \return
1021
+ * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
1022
+ */
1023
+ curandStatus_t CURANDAPI
1024
+ curandGetScrambleConstants32(unsigned int * * constants);
1025
+
1026
+ /**
1027
+ * \brief Get direction vectors for 64-bit quasirandom number generation.
1028
+ *
1029
+ * Get a pointer to an array of direction vectors that can be used
1030
+ * for quasirandom number generation. The resulting pointer will
1031
+ * reference an array of direction vectors in host memory.
1032
+ *
1033
+ * The array contains vectors for many dimensions. Each dimension
1034
+ * has 64 vectors. Each individual vector is an unsigned long long.
1035
+ *
1036
+ * Legal values for \p set are:
1037
+ * - CURAND_DIRECTION_VECTORS_64_JOEKUO6 (20,000 dimensions)
1038
+ * - CURAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6 (20,000 dimensions)
1039
+ *
1040
+ * \param vectors - Address of pointer in which to return direction vectors
1041
+ * \param set - Which set of direction vectors to use
1042
+ *
1043
+ * \return
1044
+ * - CURAND_STATUS_OUT_OF_RANGE if the choice of set is invalid \n
1045
+ * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
1046
+ */
1047
+ curandStatus_t CURANDAPI
1048
+ curandGetDirectionVectors64(curandDirectionVectors64_t *vectors[], curandDirectionVectorSet_t set);
1049
+
1050
+ /**
1051
+ * \brief Get scramble constants for 64-bit scrambled Sobol' .
1052
+ *
1053
+ * Get a pointer to an array of scramble constants that can be used
1054
+ * for quasirandom number generation. The resulting pointer will
1055
+ * reference an array of unsinged long longs in host memory.
1056
+ *
1057
+ * The array contains constants for many dimensions. Each dimension
1058
+ * has a single unsigned long long constant.
1059
+ *
1060
+ * \param constants - Address of pointer in which to return scramble constants
1061
+ *
1062
+ * \return
1063
+ * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n
1064
+ */
1065
+ curandStatus_t CURANDAPI
1066
+ curandGetScrambleConstants64(unsigned long long * * constants);
1067
+
1068
+ /** @} */
1069
+
1070
+ #endif // __CUDACC_RTC__
1071
+
1072
+ #if defined(__cplusplus)
1073
+ }
1074
+ #endif /* __cplusplus */
1075
+
1076
+
1077
+ #endif /* !defined(CURAND_H_) */
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #if !defined(CURANDDISCRETE_H_)
50
+ #define CURANDDISCRETE_H_
51
+
52
+ struct curandDistributionShift_st {
53
+ curandDistribution_t probability;
54
+ curandDistribution_t host_probability;
55
+ unsigned int shift;
56
+ unsigned int length;
57
+ unsigned int host_gen;
58
+ };
59
+
60
+ struct curandHistogramM2_st {
61
+ curandHistogramM2V_t V;
62
+ curandHistogramM2V_t host_V;
63
+ curandHistogramM2K_t K;
64
+ curandHistogramM2K_t host_K;
65
+ unsigned int host_gen;
66
+ };
67
+
68
+
69
+ struct curandDistributionM2Shift_st {
70
+ curandHistogramM2_t histogram;
71
+ curandHistogramM2_t host_histogram;
72
+ unsigned int shift;
73
+ unsigned int length;
74
+ unsigned int host_gen;
75
+ };
76
+
77
+ struct curandDiscreteDistribution_st {
78
+ curandDiscreteDistribution_t self_host_ptr;
79
+ curandDistributionM2Shift_t M2;
80
+ curandDistributionM2Shift_t host_M2;
81
+ double stddev;
82
+ double mean;
83
+ curandMethod_t method;
84
+ unsigned int host_gen;
85
+ };
86
+
87
+ #endif // !defined(CURANDDISCRETE_H_)
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete2.h ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #if !defined(CURAND_DISCRETE_H_)
52
+ #define CURAND_DISCRETE_H_
53
+
54
+ /**
55
+ * \defgroup DEVICE Device API
56
+ *
57
+ * @{
58
+ */
59
+
60
+ #ifndef __CUDACC_RTC__
61
+ #include <math.h>
62
+ #endif // __CUDACC_RTC__
63
+
64
+ #include "curand_mrg32k3a.h"
65
+ #include "curand_mtgp32_kernel.h"
66
+ #include "curand_philox4x32_x.h"
67
+
68
+
69
+ template <typename T>
70
+ QUALIFIERS unsigned int _curand_discrete(T x, curandDiscreteDistribution_t discrete_distribution){
71
+ if (discrete_distribution->method == CURAND_M2){
72
+ return _curand_M2_double(x, discrete_distribution->M2);
73
+ }
74
+ return (unsigned int)((discrete_distribution->stddev * _curand_normal_icdf_double(x)) + discrete_distribution->mean + 0.5);
75
+ }
76
+
77
+
78
+ template <typename STATE>
79
+ QUALIFIERS unsigned int curand__discrete(STATE state, curandDiscreteDistribution_t discrete_distribution){
80
+ if (discrete_distribution->method == CURAND_M2){
81
+ return curand_M2_double(state, discrete_distribution->M2);
82
+ }
83
+ return (unsigned int)((discrete_distribution->stddev * curand_normal_double(state)) + discrete_distribution->mean + 0.5); //Round to nearest
84
+ }
85
+
86
+ template <typename STATE>
87
+ QUALIFIERS uint4 curand__discrete4(STATE state, curandDiscreteDistribution_t discrete_distribution){
88
+ if (discrete_distribution->method == CURAND_M2){
89
+ return curand_M2_double4(state, discrete_distribution->M2);
90
+ }
91
+ double4 _res;
92
+ uint4 result;
93
+ _res = curand_normal4_double(state);
94
+ result.x = (unsigned int)((discrete_distribution->stddev * _res.x) + discrete_distribution->mean + 0.5); //Round to nearest
95
+ result.y = (unsigned int)((discrete_distribution->stddev * _res.y) + discrete_distribution->mean + 0.5); //Round to nearest
96
+ result.z = (unsigned int)((discrete_distribution->stddev * _res.z) + discrete_distribution->mean + 0.5); //Round to nearest
97
+ result.w = (unsigned int)((discrete_distribution->stddev * _res.w) + discrete_distribution->mean + 0.5); //Round to nearest
98
+ return result;
99
+ }
100
+
101
+ /*
102
+ * \brief Return a discrete distributed unsigned int from a XORWOW generator.
103
+ *
104
+ * Return a single discrete distributed unsigned int derived from a
105
+ * distribution defined by \p discrete_distribution from the XORWOW generator in \p state,
106
+ * increment position of generator by one.
107
+ *
108
+ * \param state - Pointer to state to update
109
+ * \param discrete_distribution - ancillary structure for discrete distribution
110
+ *
111
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
112
+ */
113
+ QUALIFIERS unsigned int curand_discrete(curandStateXORWOW_t *state, curandDiscreteDistribution_t discrete_distribution)
114
+ {
115
+ return curand__discrete(state, discrete_distribution);
116
+ }
117
+
118
+ /*
119
+ * \brief Return a discrete distributed unsigned int from a Philox4_32_10 generator.
120
+ *
121
+ * Return a single discrete distributed unsigned int derived from a
122
+ * distribution defined by \p discrete_distribution from the Philox4_32_10 generator in \p state,
123
+ * increment position of generator by one.
124
+ *
125
+ * \param state - Pointer to state to update
126
+ * \param discrete_distribution - ancillary structure for discrete distribution
127
+ *
128
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
129
+ */
130
+ QUALIFIERS unsigned int curand_discrete(curandStatePhilox4_32_10_t *state, curandDiscreteDistribution_t discrete_distribution)
131
+ {
132
+ return curand__discrete(state, discrete_distribution);
133
+ }
134
+
135
+ /*
136
+ * \brief Return four discrete distributed unsigned ints from a Philox4_32_10 generator.
137
+ *
138
+ * Return four single discrete distributed unsigned ints derived from a
139
+ * distribution defined by \p discrete_distribution from the Philox4_32_10 generator in \p state,
140
+ * increment position of generator by one.
141
+ *
142
+ * \param state - Pointer to state to update
143
+ * \param discrete_distribution - ancillary structure for discrete distribution
144
+ *
145
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
146
+ */
147
+ QUALIFIERS uint4 curand_discrete4(curandStatePhilox4_32_10_t *state, curandDiscreteDistribution_t discrete_distribution)
148
+ {
149
+ return curand__discrete4(state, discrete_distribution);
150
+ }
151
+ /*
152
+ * \brief Return a discrete distributed unsigned int from a MRG32k3a generator.
153
+ *
154
+ * Re turn a single discrete distributed unsigned int derived from a
155
+ * distribution defined by \p discrete_distribution from the MRG32k3a generator in \p state,
156
+ * increment position of generator by one.
157
+ *
158
+ * \param state - Pointer to state to update
159
+ * \param discrete_distribution - ancillary structure for discrete distribution
160
+ *
161
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
162
+ */
163
+ QUALIFIERS unsigned int curand_discrete(curandStateMRG32k3a_t *state, curandDiscreteDistribution_t discrete_distribution)
164
+ {
165
+ return curand__discrete(state, discrete_distribution);
166
+ }
167
+
168
+ /*
169
+ * \brief Return a discrete distributed unsigned int from a MTGP32 generator.
170
+ *
171
+ * Return a single discrete distributed unsigned int derived from a
172
+ * distribution defined by \p discrete_distribution from the MTGP32 generator in \p state,
173
+ * increment position of generator by one.
174
+ *
175
+ * \param state - Pointer to state to update
176
+ * \param discrete_distribution - ancillary structure for discrete distribution
177
+ *
178
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
179
+ */
180
+ QUALIFIERS unsigned int curand_discrete(curandStateMtgp32_t *state, curandDiscreteDistribution_t discrete_distribution)
181
+ {
182
+ return curand__discrete(state, discrete_distribution);
183
+ }
184
+
185
+ /*
186
+ * \brief Return a discrete distributed unsigned int from a Sobol32 generator.
187
+ *
188
+ * Return a single discrete distributed unsigned int derived from a
189
+ * distribution defined by \p discrete_distribution from the Sobol32 generator in \p state,
190
+ * increment position of generator by one.
191
+ *
192
+ * \param state - Pointer to state to update
193
+ * \param discrete_distribution - ancillary structure for discrete distribution
194
+ *
195
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
196
+ */
197
+ QUALIFIERS unsigned int curand_discrete(curandStateSobol32_t *state, curandDiscreteDistribution_t discrete_distribution)
198
+ {
199
+ return curand__discrete(state, discrete_distribution);
200
+ }
201
+
202
+ /*
203
+ * \brief Return a discrete distributed unsigned int from a scrambled Sobol32 generator.
204
+ *
205
+ * Return a single discrete distributed unsigned int derived from a
206
+ * distribution defined by \p discrete_distribution from the scrambled Sobol32 generator in \p state,
207
+ * increment position of generator by one.
208
+ *
209
+ * \param state - Pointer to state to update
210
+ * \param discrete_distribution - ancillary structure for discrete distribution
211
+ *
212
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
213
+ */
214
+ QUALIFIERS unsigned int curand_discrete(curandStateScrambledSobol32_t *state, curandDiscreteDistribution_t discrete_distribution)
215
+ {
216
+ return curand__discrete(state, discrete_distribution);
217
+ }
218
+
219
+ /*
220
+ * \brief Return a discrete distributed unsigned int from a Sobol64 generator.
221
+ *
222
+ * Return a single discrete distributed unsigned int derived from a
223
+ * distribution defined by \p discrete_distribution from the Sobol64 generator in \p state,
224
+ * increment position of generator by one.
225
+ *
226
+ * \param state - Pointer to state to update
227
+ * \param discrete_distribution - ancillary structure for discrete distribution
228
+ *
229
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
230
+ */
231
+ QUALIFIERS unsigned int curand_discrete(curandStateSobol64_t *state, curandDiscreteDistribution_t discrete_distribution)
232
+ {
233
+ return curand__discrete(state, discrete_distribution);
234
+ }
235
+
236
+ /*
237
+ * \brief Return a discrete distributed unsigned int from a scrambled Sobol64 generator.
238
+ *
239
+ * Return a single discrete distributed unsigned int derived from a
240
+ * distribution defined by \p discrete_distribution from the scrambled Sobol64 generator in \p state,
241
+ * increment position of generator by one.
242
+ *
243
+ * \param state - Pointer to state to update
244
+ * \param discrete_distribution - ancillary structure for discrete distribution
245
+ *
246
+ * \return unsigned int distributed by distribution defined by \p discrete_distribution.
247
+ */
248
+ QUALIFIERS unsigned int curand_discrete(curandStateScrambledSobol64_t *state, curandDiscreteDistribution_t discrete_distribution)
249
+ {
250
+ return curand__discrete(state, discrete_distribution);
251
+ }
252
+
253
+ #endif // !defined(CURAND_DISCRETE_H_)
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_globals.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+ #ifndef CURAND_GLOBALS_H
49
+ #define CURAND_GLOBALS_H
50
+
51
+ #define MAX_XOR_N (5)
52
+ #define SKIPAHEAD_BLOCKSIZE (4)
53
+ #define SKIPAHEAD_MASK ((1<<SKIPAHEAD_BLOCKSIZE)-1)
54
+ #define CURAND_2POW32 (4294967296.f)
55
+ #define CURAND_2POW32_DOUBLE (4294967296.)
56
+ #define CURAND_2POW32_INV (2.3283064e-10f)
57
+ #define CURAND_2POW32_INV_DOUBLE (2.3283064365386963e-10)
58
+ #define CURAND_2POW53_INV_DOUBLE (1.1102230246251565e-16)
59
+ #define CURAND_2POW32_INV_2PI (2.3283064e-10f * 6.2831855f)
60
+ #define CURAND_2PI (6.2831855f)
61
+ #define CURAND_2POW53_INV_2PI_DOUBLE (1.1102230246251565e-16 * 6.2831853071795860)
62
+ #define CURAND_PI_DOUBLE (3.1415926535897932)
63
+ #define CURAND_2PI_DOUBLE (6.2831853071795860)
64
+ #define CURAND_SQRT2 (-1.4142135f)
65
+ #define CURAND_SQRT2_DOUBLE (-1.4142135623730951)
66
+
67
+ #define SOBOL64_ITR_BINARY_DIVIDE 2
68
+ #define SOBOL_M2_BINARY_DIVIDE 10
69
+ #define MTGP32_M2_BINARY_DIVIDE 32
70
+ #define MAX_LAMBDA 400000
71
+ #define MIN_GAUSS_LAMBDA 2000
72
+
73
+ struct normal_args_st {
74
+ float mean;
75
+ float stddev;
76
+ };
77
+
78
+ typedef struct normal_args_st normal_args_t;
79
+
80
+ struct normal_args_double_st {
81
+ double mean;
82
+ double stddev;
83
+ };
84
+
85
+ typedef struct normal_args_double_st normal_args_double_t;
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+ #endif
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h ADDED
@@ -0,0 +1,1677 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #if !defined(CURAND_KERNEL_H_)
52
+ #define CURAND_KERNEL_H_
53
+
54
+ /**
55
+ * \defgroup DEVICE Device API
56
+ *
57
+ * @{
58
+ */
59
+
60
+ #if !defined(QUALIFIERS)
61
+ #define QUALIFIERS static __forceinline__ __device__
62
+ #endif
63
+
64
+ /* To prevent unused parameter warnings */
65
+ #if !defined(GCC_UNUSED_PARAMETER)
66
+ #if defined(__GNUC__)
67
+ #define GCC_UNUSED_PARAMETER __attribute__((unused))
68
+ #else
69
+ #define GCC_UNUSED_PARAMETER
70
+ #endif /* defined(__GNUC__) */
71
+ #endif /* !defined(GCC_UNUSED_PARAMETER) */
72
+
73
+ #include <nv/target>
74
+
75
+ #ifdef __CUDACC_RTC__
76
+ #define CURAND_DETAIL_USE_CUDA_STL
77
+ #endif
78
+
79
+ #if __cplusplus >= 201103L
80
+ # ifdef CURAND_DETAIL_USE_CUDA_STL
81
+ # define CURAND_STD cuda::std
82
+ # include <cuda/std/type_traits>
83
+ # else
84
+ # define CURAND_STD std
85
+ # include <type_traits>
86
+ # endif // CURAND_DETAIL_USE_CUDA_STL
87
+ #else
88
+ // To support C++03 compilation
89
+ # define CURAND_STD curand_detail
90
+ namespace curand_detail {
91
+ template<bool B, class T = void>
92
+ struct enable_if {};
93
+
94
+ template<class T>
95
+ struct enable_if<true, T> { typedef T type; };
96
+
97
+ template<class T, class U>
98
+ struct is_same { static const bool value = false; };
99
+
100
+ template<class T>
101
+ struct is_same<T, T> { static const bool value = true; };
102
+ } // namespace curand_detail
103
+ #endif // __cplusplus >= 201103L
104
+
105
+ #ifndef __CUDACC_RTC__
106
+ #include <math.h>
107
+ #endif // __CUDACC_RTC__
108
+
109
+ #include "curand.h"
110
+ #include "curand_discrete.h"
111
+ #include "curand_precalc.h"
112
+ #include "curand_mrg32k3a.h"
113
+ #include "curand_mtgp32_kernel.h"
114
+ #include "curand_philox4x32_x.h"
115
+ #include "curand_globals.h"
116
+
117
+ /* Test RNG */
118
+ /* This generator uses the formula:
119
+ x_n = x_(n-1) + 1 mod 2^32
120
+ x_0 = (unsigned int)seed * 3
121
+ Subsequences are spaced 31337 steps apart.
122
+ */
123
+ struct curandStateTest {
124
+ unsigned int v;
125
+ };
126
+
127
+ /** \cond UNHIDE_TYPEDEFS */
128
+ typedef struct curandStateTest curandStateTest_t;
129
+ /** \endcond */
130
+
131
+ /* XORSHIFT FAMILY RNGs */
132
+ /* These generators are a family proposed by Marsaglia. They keep state
133
+ in 32 bit chunks, then use repeated shift and xor operations to scramble
134
+ the bits. The following generators are a combination of a simple Weyl
135
+ generator with an N variable XORSHIFT generator.
136
+ */
137
+
138
+ /* XORSHIFT RNG */
139
+ /* This generator uses the xorwow formula of
140
+ www.jstatsoft.org/v08/i14/paper page 5
141
+ Has period 2^192 - 2^32.
142
+ */
143
+ /**
144
+ * CURAND XORWOW state
145
+ */
146
+ struct curandStateXORWOW;
147
+
148
+ /*
149
+ * Implementation details not in reference documentation */
150
+ struct curandStateXORWOW {
151
+ unsigned int d, v[5];
152
+ int boxmuller_flag;
153
+ int boxmuller_flag_double;
154
+ float boxmuller_extra;
155
+ double boxmuller_extra_double;
156
+ };
157
+
158
+ /*
159
+ * CURAND XORWOW state
160
+ */
161
+ /** \cond UNHIDE_TYPEDEFS */
162
+ typedef struct curandStateXORWOW curandStateXORWOW_t;
163
+
164
+ #define EXTRA_FLAG_NORMAL 0x00000001
165
+ #define EXTRA_FLAG_LOG_NORMAL 0x00000002
166
+ /** \endcond */
167
+
168
+ /* Combined Multiple Recursive Generators */
169
+ /* These generators are a family proposed by L'Ecuyer. They keep state
170
+ in sets of doubles, then use repeated modular arithmetic multiply operations
171
+ to scramble the bits in each set, and combine the result.
172
+ */
173
+
174
+ /* MRG32k3a RNG */
175
+ /* This generator uses the MRG32k3A formula of
176
+ http://www.iro.umontreal.ca/~lecuyer/myftp/streams00/c++/streams4.pdf
177
+ Has period 2^191.
178
+ */
179
+
180
+ /* moduli for the recursions */
181
+ /** \cond UNHIDE_DEFINES */
182
+ #define MRG32K3A_MOD1 4294967087.
183
+ #define MRG32K3A_MOD2 4294944443.
184
+
185
+ /* Constants used in generation */
186
+
187
+ #define MRG32K3A_A12 1403580.
188
+ #define MRG32K3A_A13N 810728.
189
+ #define MRG32K3A_A21 527612.
190
+ #define MRG32K3A_A23N 1370589.
191
+ #define MRG32K3A_NORM (2.3283065498378288e-10)
192
+ //
193
+ // #define MRG32K3A_BITS_NORM ((double)((POW32_DOUBLE-1.0)/MOD1))
194
+ // above constant, used verbatim, rounds differently on some host systems.
195
+ #define MRG32K3A_BITS_NORM 1.000000048662
196
+
197
+ /** \endcond */
198
+
199
+
200
+
201
+
202
+ /**
203
+ * CURAND MRG32K3A state
204
+ */
205
+ struct curandStateMRG32k3a;
206
+
207
+ /* Implementation details not in reference documentation */
208
+ struct curandStateMRG32k3a {
209
+ unsigned int s1[3];
210
+ unsigned int s2[3];
211
+ int boxmuller_flag;
212
+ int boxmuller_flag_double;
213
+ float boxmuller_extra;
214
+ double boxmuller_extra_double;
215
+ };
216
+
217
+ /*
218
+ * CURAND MRG32K3A state
219
+ */
220
+ /** \cond UNHIDE_TYPEDEFS */
221
+ typedef struct curandStateMRG32k3a curandStateMRG32k3a_t;
222
+ /** \endcond */
223
+
224
+ /* SOBOL QRNG */
225
+ /**
226
+ * CURAND Sobol32 state
227
+ */
228
+ struct curandStateSobol32;
229
+
230
+ /* Implementation details not in reference documentation */
231
+ struct curandStateSobol32 {
232
+ unsigned int i, x, c;
233
+ unsigned int direction_vectors[32];
234
+ };
235
+
236
+ /*
237
+ * CURAND Sobol32 state
238
+ */
239
+ /** \cond UNHIDE_TYPEDEFS */
240
+ typedef struct curandStateSobol32 curandStateSobol32_t;
241
+ /** \endcond */
242
+
243
+ /**
244
+ * CURAND Scrambled Sobol32 state
245
+ */
246
+ struct curandStateScrambledSobol32;
247
+
248
+ /* Implementation details not in reference documentation */
249
+ struct curandStateScrambledSobol32 {
250
+ unsigned int i, x, c;
251
+ unsigned int direction_vectors[32];
252
+ };
253
+
254
+ /*
255
+ * CURAND Scrambled Sobol32 state
256
+ */
257
+ /** \cond UNHIDE_TYPEDEFS */
258
+ typedef struct curandStateScrambledSobol32 curandStateScrambledSobol32_t;
259
+ /** \endcond */
260
+
261
+ /**
262
+ * CURAND Sobol64 state
263
+ */
264
+ struct curandStateSobol64;
265
+
266
+ /* Implementation details not in reference documentation */
267
+ struct curandStateSobol64 {
268
+ unsigned long long i, x, c;
269
+ unsigned long long direction_vectors[64];
270
+ };
271
+
272
+ /*
273
+ * CURAND Sobol64 state
274
+ */
275
+ /** \cond UNHIDE_TYPEDEFS */
276
+ typedef struct curandStateSobol64 curandStateSobol64_t;
277
+ /** \endcond */
278
+
279
+ /**
280
+ * CURAND Scrambled Sobol64 state
281
+ */
282
+ struct curandStateScrambledSobol64;
283
+
284
+ /* Implementation details not in reference documentation */
285
+ struct curandStateScrambledSobol64 {
286
+ unsigned long long i, x, c;
287
+ unsigned long long direction_vectors[64];
288
+ };
289
+
290
+ /*
291
+ * CURAND Scrambled Sobol64 state
292
+ */
293
+ /** \cond UNHIDE_TYPEDEFS */
294
+ typedef struct curandStateScrambledSobol64 curandStateScrambledSobol64_t;
295
+ /** \endcond */
296
+
297
+ /*
298
+ * Default RNG
299
+ */
300
+ /** \cond UNHIDE_TYPEDEFS */
301
+ typedef struct curandStateXORWOW curandState_t;
302
+ typedef struct curandStateXORWOW curandState;
303
+ /** \endcond */
304
+
305
+ /****************************************************************************/
306
+ /* Utility functions needed by RNGs */
307
+ /****************************************************************************/
308
+ /** \cond UNHIDE_UTILITIES */
309
+ /*
310
+ multiply vector by matrix, store in result
311
+ matrix is n x n, measured in 32 bit units
312
+ matrix is stored in row major order
313
+ vector and result cannot be same pointer
314
+ */
315
+ template<int N>
316
+ QUALIFIERS void __curand_matvec_inplace(unsigned int *vector, unsigned int *matrix)
317
+ {
318
+ unsigned int result[N] = { 0 };
319
+ for(int i = 0; i < N; i++) {
320
+ #ifdef __CUDA_ARCH__
321
+ #pragma unroll 16
322
+ #endif
323
+ for(int j = 0; j < 32; j++) {
324
+ if(vector[i] & (1 << j)) {
325
+ for(int k = 0; k < N; k++) {
326
+ result[k] ^= matrix[N * (i * 32 + j) + k];
327
+ }
328
+ }
329
+ }
330
+ }
331
+ for(int i = 0; i < N; i++) {
332
+ vector[i] = result[i];
333
+ }
334
+ }
335
+
336
+ QUALIFIERS void __curand_matvec(unsigned int *vector, unsigned int *matrix,
337
+ unsigned int *result, int n)
338
+ {
339
+ for(int i = 0; i < n; i++) {
340
+ result[i] = 0;
341
+ }
342
+ for(int i = 0; i < n; i++) {
343
+ for(int j = 0; j < 32; j++) {
344
+ if(vector[i] & (1 << j)) {
345
+ for(int k = 0; k < n; k++) {
346
+ result[k] ^= matrix[n * (i * 32 + j) + k];
347
+ }
348
+ }
349
+ }
350
+ }
351
+ }
352
+
353
+ /* generate identity matrix */
354
+ QUALIFIERS void __curand_matidentity(unsigned int *matrix, int n)
355
+ {
356
+ int r;
357
+ for(int i = 0; i < n * 32; i++) {
358
+ for(int j = 0; j < n; j++) {
359
+ r = i & 31;
360
+ if(i / 32 == j) {
361
+ matrix[i * n + j] = (1 << r);
362
+ } else {
363
+ matrix[i * n + j] = 0;
364
+ }
365
+ }
366
+ }
367
+ }
368
+
369
+ /* multiply matrixA by matrixB, store back in matrixA
370
+ matrixA and matrixB must not be same matrix */
371
+ QUALIFIERS void __curand_matmat(unsigned int *matrixA, unsigned int *matrixB, int n)
372
+ {
373
+ unsigned int result[MAX_XOR_N];
374
+ for(int i = 0; i < n * 32; i++) {
375
+ __curand_matvec(matrixA + i * n, matrixB, result, n);
376
+ for(int j = 0; j < n; j++) {
377
+ matrixA[i * n + j] = result[j];
378
+ }
379
+ }
380
+ }
381
+
382
+ /* copy vectorA to vector */
383
+ QUALIFIERS void __curand_veccopy(unsigned int *vector, unsigned int *vectorA, int n)
384
+ {
385
+ for(int i = 0; i < n; i++) {
386
+ vector[i] = vectorA[i];
387
+ }
388
+ }
389
+
390
+ /* copy matrixA to matrix */
391
+ QUALIFIERS void __curand_matcopy(unsigned int *matrix, unsigned int *matrixA, int n)
392
+ {
393
+ for(int i = 0; i < n * n * 32; i++) {
394
+ matrix[i] = matrixA[i];
395
+ }
396
+ }
397
+
398
+ /* compute matrixA to power p, store result in matrix */
399
+ QUALIFIERS void __curand_matpow(unsigned int *matrix, unsigned int *matrixA,
400
+ unsigned long long p, int n)
401
+ {
402
+ unsigned int matrixR[MAX_XOR_N * MAX_XOR_N * 32];
403
+ unsigned int matrixS[MAX_XOR_N * MAX_XOR_N * 32];
404
+ __curand_matidentity(matrix, n);
405
+ __curand_matcopy(matrixR, matrixA, n);
406
+ while(p) {
407
+ if(p & 1) {
408
+ __curand_matmat(matrix, matrixR, n);
409
+ }
410
+ __curand_matcopy(matrixS, matrixR, n);
411
+ __curand_matmat(matrixR, matrixS, n);
412
+ p >>= 1;
413
+ }
414
+ }
415
+
416
+ /****************************************************************************/
417
+ /* Utility functions needed by MRG32k3a RNG */
418
+ /* Matrix operations modulo some integer less than 2**32, done in */
419
+ /* double precision floating point, with care not to overflow 53 bits */
420
+ /****************************************************************************/
421
+
422
+ /* return i mod m. */
423
+ /* assumes i and m are integers represented accurately in doubles */
424
+
425
+ QUALIFIERS double curand_MRGmod(double i, double m)
426
+ {
427
+ double quo;
428
+ double rem;
429
+ quo = floor(i/m);
430
+ rem = i - (quo*m);
431
+ if (rem < 0.0) rem += m;
432
+ return rem;
433
+ }
434
+
435
+ /* Multiplication modulo m. Inputs i and j less than 2**32 */
436
+ /* Ensure intermediate results do not exceed 2**53 */
437
+
438
+ QUALIFIERS double curand_MRGmodMul(double i, double j, double m)
439
+ {
440
+ double tempHi;
441
+ double tempLo;
442
+
443
+ tempHi = floor(i/131072.0);
444
+ tempLo = i - (tempHi*131072.0);
445
+ tempLo = curand_MRGmod( curand_MRGmod( (tempHi * j), m) * 131072.0 + curand_MRGmod(tempLo * j, m),m);
446
+
447
+ if (tempLo < 0.0) tempLo += m;
448
+ return tempLo;
449
+ }
450
+
451
+ /* multiply 3 by 3 matrices of doubles, modulo m */
452
+
453
+ QUALIFIERS void curand_MRGmatMul3x3(unsigned int i1[][3],unsigned int i2[][3],unsigned int o[][3],double m)
454
+ {
455
+ int i,j;
456
+ double temp[3][3];
457
+ for (i=0; i<3; i++){
458
+ for (j=0; j<3; j++){
459
+ temp[i][j] = ( curand_MRGmodMul(i1[i][0], i2[0][j], m) +
460
+ curand_MRGmodMul(i1[i][1], i2[1][j], m) +
461
+ curand_MRGmodMul(i1[i][2], i2[2][j], m));
462
+ temp[i][j] = curand_MRGmod( temp[i][j], m );
463
+ }
464
+ }
465
+ for (i=0; i<3; i++){
466
+ for (j=0; j<3; j++){
467
+ o[i][j] = (unsigned int)temp[i][j];
468
+ }
469
+ }
470
+ }
471
+
472
+ /* multiply 3 by 3 matrix times 3 by 1 vector of doubles, modulo m */
473
+
474
+ QUALIFIERS void curand_MRGmatVecMul3x3( unsigned int i[][3], unsigned int v[], double m)
475
+ {
476
+ int k;
477
+ double t[3];
478
+ for (k = 0; k < 3; k++) {
479
+ t[k] = ( curand_MRGmodMul(i[k][0], v[0], m) +
480
+ curand_MRGmodMul(i[k][1], v[1], m) +
481
+ curand_MRGmodMul(i[k][2], v[2], m) );
482
+ t[k] = curand_MRGmod( t[k], m );
483
+ }
484
+ for (k = 0; k < 3; k++) {
485
+ v[k] = (unsigned int)t[k];
486
+ }
487
+
488
+ }
489
+
490
+ /* raise a 3 by 3 matrix of doubles to a 64 bit integer power pow, modulo m */
491
+ /* input is index zero of an array of 3 by 3 matrices m, */
492
+ /* each m = m[0]**(2**index) */
493
+
494
+ QUALIFIERS void curand_MRGmatPow3x3( unsigned int in[][3][3], unsigned int o[][3], double m, unsigned long long pow )
495
+ {
496
+ int i,j;
497
+ for ( i = 0; i < 3; i++ ) {
498
+ for ( j = 0; j < 3; j++ ) {
499
+ o[i][j] = 0;
500
+ if ( i == j ) o[i][j] = 1;
501
+ }
502
+ }
503
+ i = 0;
504
+ curand_MRGmatVecMul3x3(o,o[0],m);
505
+ while (pow) {
506
+ if ( pow & 1ll ) {
507
+ curand_MRGmatMul3x3(in[i], o, o, m);
508
+ }
509
+ i++;
510
+ pow >>= 1;
511
+ }
512
+ }
513
+
514
+ /* raise a 3 by 3 matrix of doubles to the power */
515
+ /* 2 to the power (pow modulo 191), modulo m */
516
+
517
+ QUALIFIERS void curnand_MRGmatPow2Pow3x3( double in[][3], double o[][3], double m, unsigned long pow )
518
+ {
519
+ unsigned int temp[3][3];
520
+ int i,j;
521
+ pow = pow % 191;
522
+ for ( i = 0; i < 3; i++ ) {
523
+ for ( j = 0; j < 3; j++ ) {
524
+ temp[i][j] = (unsigned int)in[i][j];
525
+ }
526
+ }
527
+ while (pow) {
528
+ curand_MRGmatMul3x3(temp, temp, temp, m);
529
+ pow--;
530
+ }
531
+ for ( i = 0; i < 3; i++ ) {
532
+ for ( j = 0; j < 3; j++ ) {
533
+ o[i][j] = temp[i][j];
534
+ }
535
+ }
536
+ }
537
+
538
+ /** \endcond */
539
+
540
+ /****************************************************************************/
541
+ /* Kernel implementations of RNGs */
542
+ /****************************************************************************/
543
+
544
+ /* Test RNG */
545
+
546
+ QUALIFIERS void curand_init(unsigned long long seed,
547
+ unsigned long long subsequence,
548
+ unsigned long long offset,
549
+ curandStateTest_t *state)
550
+ {
551
+ state->v = (unsigned int)(seed * 3) + (unsigned int)(subsequence * 31337) + \
552
+ (unsigned int)offset;
553
+ }
554
+
555
+
556
+ QUALIFIERS unsigned int curand(curandStateTest_t *state)
557
+ {
558
+ unsigned int r = state->v++;
559
+ return r;
560
+ }
561
+
562
+ QUALIFIERS void skipahead(unsigned long long n, curandStateTest_t *state)
563
+ {
564
+ state->v += (unsigned int)n;
565
+ }
566
+
567
+ /* XORWOW RNG */
568
+
569
+ template <typename T, int n>
570
+ QUALIFIERS void __curand_generate_skipahead_matrix_xor(unsigned int matrix[])
571
+ {
572
+ T state;
573
+ // Generate matrix that advances one step
574
+ // matrix has n * n * 32 32-bit elements
575
+ // solve for matrix by stepping single bit states
576
+ for(int i = 0; i < 32 * n; i++) {
577
+ state.d = 0;
578
+ for(int j = 0; j < n; j++) {
579
+ state.v[j] = 0;
580
+ }
581
+ state.v[i / 32] = (1 << (i & 31));
582
+ curand(&state);
583
+ for(int j = 0; j < n; j++) {
584
+ matrix[i * n + j] = state.v[j];
585
+ }
586
+ }
587
+ }
588
+
589
+ template <typename T, int n>
590
+ QUALIFIERS void _skipahead_scratch(unsigned long long x, T *state, unsigned int *scratch)
591
+ {
592
+ // unsigned int matrix[n * n * 32];
593
+ unsigned int *matrix = scratch;
594
+ // unsigned int matrixA[n * n * 32];
595
+ unsigned int *matrixA = scratch + (n * n * 32);
596
+ // unsigned int vector[n];
597
+ unsigned int *vector = scratch + (n * n * 32) + (n * n * 32);
598
+ // unsigned int result[n];
599
+ unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n;
600
+ unsigned long long p = x;
601
+ for(int i = 0; i < n; i++) {
602
+ vector[i] = state->v[i];
603
+ }
604
+ int matrix_num = 0;
605
+ while(p && (matrix_num < PRECALC_NUM_MATRICES - 1)) {
606
+ for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
607
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
608
+ __curand_matvec(vector, precalc_xorwow_offset_matrix[matrix_num], result, n);
609
+ ,
610
+ __curand_matvec(vector, precalc_xorwow_offset_matrix_host[matrix_num], result, n);
611
+ )
612
+ __curand_veccopy(vector, result, n);
613
+ }
614
+ p >>= PRECALC_BLOCK_SIZE;
615
+ matrix_num++;
616
+ }
617
+ if(p) {
618
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
619
+ __curand_matcopy(matrix, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n);
620
+ __curand_matcopy(matrixA, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n);
621
+ ,
622
+ __curand_matcopy(matrix, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n);
623
+ __curand_matcopy(matrixA, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n);
624
+ )
625
+ }
626
+ while(p) {
627
+ for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) {
628
+ __curand_matvec(vector, matrixA, result, n);
629
+ __curand_veccopy(vector, result, n);
630
+ }
631
+ p >>= SKIPAHEAD_BLOCKSIZE;
632
+ if(p) {
633
+ for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) {
634
+ __curand_matmat(matrix, matrixA, n);
635
+ __curand_matcopy(matrixA, matrix, n);
636
+ }
637
+ }
638
+ }
639
+ for(int i = 0; i < n; i++) {
640
+ state->v[i] = vector[i];
641
+ }
642
+ state->d += 362437 * (unsigned int)x;
643
+ }
644
+
645
+ template <typename T, int n>
646
+ QUALIFIERS void _skipahead_sequence_scratch(unsigned long long x, T *state, unsigned int *scratch)
647
+ {
648
+ // unsigned int matrix[n * n * 32];
649
+ unsigned int *matrix = scratch;
650
+ // unsigned int matrixA[n * n * 32];
651
+ unsigned int *matrixA = scratch + (n * n * 32);
652
+ // unsigned int vector[n];
653
+ unsigned int *vector = scratch + (n * n * 32) + (n * n * 32);
654
+ // unsigned int result[n];
655
+ unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n;
656
+ unsigned long long p = x;
657
+ for(int i = 0; i < n; i++) {
658
+ vector[i] = state->v[i];
659
+ }
660
+ int matrix_num = 0;
661
+ while(p && matrix_num < PRECALC_NUM_MATRICES - 1) {
662
+ for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
663
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
664
+ __curand_matvec(vector, precalc_xorwow_matrix[matrix_num], result, n);
665
+ ,
666
+ __curand_matvec(vector, precalc_xorwow_matrix_host[matrix_num], result, n);
667
+ )
668
+ __curand_veccopy(vector, result, n);
669
+ }
670
+ p >>= PRECALC_BLOCK_SIZE;
671
+ matrix_num++;
672
+ }
673
+ if(p) {
674
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
675
+ __curand_matcopy(matrix, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n);
676
+ __curand_matcopy(matrixA, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n);
677
+ ,
678
+ __curand_matcopy(matrix, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n);
679
+ __curand_matcopy(matrixA, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n);
680
+ )
681
+ }
682
+ while(p) {
683
+ for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) {
684
+ __curand_matvec(vector, matrixA, result, n);
685
+ __curand_veccopy(vector, result, n);
686
+ }
687
+ p >>= SKIPAHEAD_BLOCKSIZE;
688
+ if(p) {
689
+ for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) {
690
+ __curand_matmat(matrix, matrixA, n);
691
+ __curand_matcopy(matrixA, matrix, n);
692
+ }
693
+ }
694
+ }
695
+ for(int i = 0; i < n; i++) {
696
+ state->v[i] = vector[i];
697
+ }
698
+ /* No update of state->d needed, guaranteed to be a multiple of 2^32 */
699
+ }
700
+
701
+ template <typename T, int N>
702
+ QUALIFIERS void _skipahead_inplace(const unsigned long long x, T *state)
703
+ {
704
+ unsigned long long p = x;
705
+ int matrix_num = 0;
706
+ while(p) {
707
+ for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) {
708
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
709
+ __curand_matvec_inplace<N>(state->v, precalc_xorwow_offset_matrix[matrix_num]);
710
+ ,
711
+ __curand_matvec_inplace<N>(state->v, precalc_xorwow_offset_matrix_host[matrix_num]);
712
+ )
713
+ }
714
+ p >>= PRECALC_BLOCK_SIZE;
715
+ matrix_num++;
716
+ }
717
+ state->d += 362437 * (unsigned int)x;
718
+ }
719
+
720
+ template <typename T, int N>
721
+ QUALIFIERS void _skipahead_sequence_inplace(unsigned long long x, T *state)
722
+ {
723
+ int matrix_num = 0;
724
+ while(x) {
725
+ for(unsigned int t = 0; t < (x & PRECALC_BLOCK_MASK); t++) {
726
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
727
+ __curand_matvec_inplace<N>(state->v, precalc_xorwow_matrix[matrix_num]);
728
+ ,
729
+ __curand_matvec_inplace<N>(state->v, precalc_xorwow_matrix_host[matrix_num]);
730
+ )
731
+ }
732
+ x >>= PRECALC_BLOCK_SIZE;
733
+ matrix_num++;
734
+ }
735
+ /* No update of state->d needed, guaranteed to be a multiple of 2^32 */
736
+ }
737
+
738
+ /**
739
+ * \brief Update XORWOW state to skip \p n elements.
740
+ *
741
+ * Update the XORWOW state in \p state to skip ahead \p n elements.
742
+ *
743
+ * All values of \p n are valid. Large values require more computation and so
744
+ * will take more time to complete.
745
+ *
746
+ * \param n - Number of elements to skip
747
+ * \param state - Pointer to state to update
748
+ */
749
+ QUALIFIERS void skipahead(unsigned long long n, curandStateXORWOW_t *state)
750
+ {
751
+ _skipahead_inplace<curandStateXORWOW_t, 5>(n, state);
752
+ }
753
+
754
+ /**
755
+ * \brief Update XORWOW state to skip ahead \p n subsequences.
756
+ *
757
+ * Update the XORWOW state in \p state to skip ahead \p n subsequences. Each
758
+ * subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
759
+ * \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly * n elements.
760
+ *
761
+ * All values of \p n are valid. Large values require more computation and so
762
+ * will take more time to complete.
763
+ *
764
+ * \param n - Number of subsequences to skip
765
+ * \param state - Pointer to state to update
766
+ */
767
+ QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateXORWOW_t *state)
768
+ {
769
+ _skipahead_sequence_inplace<curandStateXORWOW_t, 5>(n, state);
770
+ }
771
+
772
+ QUALIFIERS void _curand_init_scratch(unsigned long long seed,
773
+ unsigned long long subsequence,
774
+ unsigned long long offset,
775
+ curandStateXORWOW_t *state,
776
+ unsigned int *scratch)
777
+ {
778
+ // Break up seed, apply salt
779
+ // Constants are arbitrary nonzero values
780
+ unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL;
781
+ unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL;
782
+ // Simple multiplication to mix up bits
783
+ // Constants are arbitrary odd values
784
+ unsigned int t0 = 1099087573UL * s0;
785
+ unsigned int t1 = 2591861531UL * s1;
786
+ state->d = 6615241 + t1 + t0;
787
+ state->v[0] = 123456789UL + t0;
788
+ state->v[1] = 362436069UL ^ t0;
789
+ state->v[2] = 521288629UL + t1;
790
+ state->v[3] = 88675123UL ^ t1;
791
+ state->v[4] = 5783321UL + t0;
792
+ _skipahead_sequence_scratch<curandStateXORWOW_t, 5>(subsequence, state, scratch);
793
+ _skipahead_scratch<curandStateXORWOW_t, 5>(offset, state, scratch);
794
+ state->boxmuller_flag = 0;
795
+ state->boxmuller_flag_double = 0;
796
+ state->boxmuller_extra = 0.f;
797
+ state->boxmuller_extra_double = 0.;
798
+ }
799
+
800
+ QUALIFIERS void _curand_init_inplace(unsigned long long seed,
801
+ unsigned long long subsequence,
802
+ unsigned long long offset,
803
+ curandStateXORWOW_t *state)
804
+ {
805
+ // Break up seed, apply salt
806
+ // Constants are arbitrary nonzero values
807
+ unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL;
808
+ unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL;
809
+ // Simple multiplication to mix up bits
810
+ // Constants are arbitrary odd values
811
+ unsigned int t0 = 1099087573UL * s0;
812
+ unsigned int t1 = 2591861531UL * s1;
813
+ state->d = 6615241 + t1 + t0;
814
+ state->v[0] = 123456789UL + t0;
815
+ state->v[1] = 362436069UL ^ t0;
816
+ state->v[2] = 521288629UL + t1;
817
+ state->v[3] = 88675123UL ^ t1;
818
+ state->v[4] = 5783321UL + t0;
819
+ _skipahead_sequence_inplace<curandStateXORWOW_t, 5>(subsequence, state);
820
+ _skipahead_inplace<curandStateXORWOW_t, 5>(offset, state);
821
+ state->boxmuller_flag = 0;
822
+ state->boxmuller_flag_double = 0;
823
+ state->boxmuller_extra = 0.f;
824
+ state->boxmuller_extra_double = 0.;
825
+ }
826
+
827
+ /**
828
+ * \brief Initialize XORWOW state.
829
+ *
830
+ * Initialize XORWOW state in \p state with the given \p seed, \p subsequence,
831
+ * and \p offset.
832
+ *
833
+ * All input values of \p seed, \p subsequence, and \p offset are legal. Large
834
+ * values for \p subsequence and \p offset require more computation and so will
835
+ * take more time to complete.
836
+ *
837
+ * A value of 0 for \p seed sets the state to the values of the original
838
+ * published version of the \p xorwow algorithm.
839
+ *
840
+ * \param seed - Arbitrary bits to use as a seed
841
+ * \param subsequence - Subsequence to start at
842
+ * \param offset - Absolute offset into sequence
843
+ * \param state - Pointer to state to initialize
844
+ */
845
+ QUALIFIERS void curand_init(unsigned long long seed,
846
+ unsigned long long subsequence,
847
+ unsigned long long offset,
848
+ curandStateXORWOW_t *state)
849
+ {
850
+ _curand_init_inplace(seed, subsequence, offset, state);
851
+ }
852
+
853
+ /**
854
+ * \brief Return 32-bits of pseudorandomness from an XORWOW generator.
855
+ *
856
+ * Return 32-bits of pseudorandomness from the XORWOW generator in \p state,
857
+ * increment position of generator by one.
858
+ *
859
+ * \param state - Pointer to state to update
860
+ *
861
+ * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
862
+ */
863
+ QUALIFIERS unsigned int curand(curandStateXORWOW_t *state)
864
+ {
865
+ unsigned int t;
866
+ t = (state->v[0] ^ (state->v[0] >> 2));
867
+ state->v[0] = state->v[1];
868
+ state->v[1] = state->v[2];
869
+ state->v[2] = state->v[3];
870
+ state->v[3] = state->v[4];
871
+ state->v[4] = (state->v[4] ^ (state->v[4] <<4)) ^ (t ^ (t << 1));
872
+ state->d += 362437;
873
+ return state->v[4] + state->d;
874
+ }
875
+
876
+
877
+ /**
878
+ * \brief Return 32-bits of pseudorandomness from an Philox4_32_10 generator.
879
+ *
880
+ * Return 32-bits of pseudorandomness from the Philox4_32_10 generator in \p state,
881
+ * increment position of generator by one.
882
+ *
883
+ * \param state - Pointer to state to update
884
+ *
885
+ * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
886
+ */
887
+
888
+ QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state)
889
+ {
890
+ // Maintain the invariant: output[STATE] is always "good" and
891
+ // is the next value to be returned by curand.
892
+ unsigned int ret;
893
+ switch(state->STATE++){
894
+ default:
895
+ ret = state->output.x;
896
+ break;
897
+ case 1:
898
+ ret = state->output.y;
899
+ break;
900
+ case 2:
901
+ ret = state->output.z;
902
+ break;
903
+ case 3:
904
+ ret = state->output.w;
905
+ break;
906
+ }
907
+ if(state->STATE == 4){
908
+ Philox_State_Incr(state);
909
+ state->output = curand_Philox4x32_10(state->ctr,state->key);
910
+ state->STATE = 0;
911
+ }
912
+ return ret;
913
+ }
914
+
915
+ /**
916
+ * \brief Return tuple of 4 32-bit pseudorandoms from a Philox4_32_10 generator.
917
+ *
918
+ * Return 128 bits of pseudorandomness from the Philox4_32_10 generator in \p state,
919
+ * increment position of generator by four.
920
+ *
921
+ * \param state - Pointer to state to update
922
+ *
923
+ * \return 128-bits of pseudorandomness as a uint4, all bits valid to use.
924
+ */
925
+
926
+ QUALIFIERS uint4 curand4(curandStatePhilox4_32_10_t *state)
927
+ {
928
+ uint4 r;
929
+
930
+ uint4 tmp = state->output;
931
+ Philox_State_Incr(state);
932
+ state->output= curand_Philox4x32_10(state->ctr,state->key);
933
+ switch(state->STATE){
934
+ case 0:
935
+ return tmp;
936
+ case 1:
937
+ r.x = tmp.y;
938
+ r.y = tmp.z;
939
+ r.z = tmp.w;
940
+ r.w = state->output.x;
941
+ break;
942
+ case 2:
943
+ r.x = tmp.z;
944
+ r.y = tmp.w;
945
+ r.z = state->output.x;
946
+ r.w = state->output.y;
947
+ break;
948
+ case 3:
949
+ r.x = tmp.w;
950
+ r.y = state->output.x;
951
+ r.z = state->output.y;
952
+ r.w = state->output.z;
953
+ break;
954
+ default:
955
+ // NOT possible but needed to avoid compiler warnings
956
+ return tmp;
957
+ }
958
+ return r;
959
+ }
960
+
961
+ /**
962
+ * \brief Update Philox4_32_10 state to skip \p n elements.
963
+ *
964
+ * Update the Philox4_32_10 state in \p state to skip ahead \p n elements.
965
+ *
966
+ * All values of \p n are valid.
967
+ *
968
+ * \param n - Number of elements to skip
969
+ * \param state - Pointer to state to update
970
+ */
971
+ QUALIFIERS void skipahead(unsigned long long n, curandStatePhilox4_32_10_t *state)
972
+ {
973
+ state->STATE += (n & 3);
974
+ n /= 4;
975
+ if( state->STATE > 3 ){
976
+ n += 1;
977
+ state->STATE -= 4;
978
+ }
979
+ Philox_State_Incr(state, n);
980
+ state->output = curand_Philox4x32_10(state->ctr,state->key);
981
+ }
982
+
983
+ /**
984
+ * \brief Update Philox4_32_10 state to skip ahead \p n subsequences.
985
+ *
986
+ * Update the Philox4_32_10 state in \p state to skip ahead \p n subsequences. Each
987
+ * subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
988
+ * \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly * n elements.
989
+ *
990
+ * All values of \p n are valid.
991
+ *
992
+ * \param n - Number of subsequences to skip
993
+ * \param state - Pointer to state to update
994
+ */
995
+ QUALIFIERS void skipahead_sequence(unsigned long long n, curandStatePhilox4_32_10_t *state)
996
+ {
997
+ Philox_State_Incr_hi(state, n);
998
+ state->output = curand_Philox4x32_10(state->ctr,state->key);
999
+ }
1000
+
1001
+ /**
1002
+ * \brief Initialize Philox4_32_10 state.
1003
+ *
1004
+ * Initialize Philox4_32_10 state in \p state with the given \p seed, p\ subsequence,
1005
+ * and \p offset.
1006
+ *
1007
+ * All input values for \p seed, \p subseqence and \p offset are legal. Each of the
1008
+ * \xmlonly<ph outputclass="xmlonly">2<sup>64</sup></ph>\endxmlonly possible
1009
+ * values of seed selects an independent sequence of length
1010
+ * \xmlonly<ph outputclass="xmlonly">2<sup>130</sup></ph>\endxmlonly.
1011
+ * The first
1012
+ * \xmlonly<ph outputclass="xmlonly">2<sup>66</sup> * subsequence + offset</ph>\endxmlonly.
1013
+ * values of the sequence are skipped.
1014
+ * I.e., subsequences are of length
1015
+ * \xmlonly<ph outputclass="xmlonly">2<sup>66</sup></ph>\endxmlonly.
1016
+ *
1017
+ * \param seed - Arbitrary bits to use as a seed
1018
+ * \param subsequence - Subsequence to start at
1019
+ * \param offset - Absolute offset into subsequence
1020
+ * \param state - Pointer to state to initialize
1021
+ */
1022
+ QUALIFIERS void curand_init(unsigned long long seed,
1023
+ unsigned long long subsequence,
1024
+ unsigned long long offset,
1025
+ curandStatePhilox4_32_10_t *state)
1026
+ {
1027
+ state->ctr = make_uint4(0, 0, 0, 0);
1028
+ state->key.x = (unsigned int)seed;
1029
+ state->key.y = (unsigned int)(seed>>32);
1030
+ state->STATE = 0;
1031
+ state->boxmuller_flag = 0;
1032
+ state->boxmuller_flag_double = 0;
1033
+ state->boxmuller_extra = 0.f;
1034
+ state->boxmuller_extra_double = 0.;
1035
+ skipahead_sequence(subsequence, state);
1036
+ skipahead(offset, state);
1037
+ }
1038
+
1039
+
1040
+ /* MRG32k3a RNG */
1041
+
1042
+ /* Base generator for MRG32k3a */
1043
+ QUALIFIERS unsigned long long __curand_umad(GCC_UNUSED_PARAMETER unsigned int a, GCC_UNUSED_PARAMETER unsigned int b, GCC_UNUSED_PARAMETER unsigned long long c)
1044
+ {
1045
+ unsigned long long r = 0;
1046
+ NV_IF_TARGET(NV_PROVIDES_SM_61,
1047
+ asm("mad.wide.u32 %0, %1, %2, %3;"
1048
+ : "=l"(r) : "r"(a), "r"(b), "l"(c));
1049
+ )
1050
+ return r;
1051
+ }
1052
+ QUALIFIERS unsigned long long __curand_umul(GCC_UNUSED_PARAMETER unsigned int a, GCC_UNUSED_PARAMETER unsigned int b)
1053
+ {
1054
+ unsigned long long r = 0;
1055
+ NV_IF_TARGET(NV_PROVIDES_SM_61,
1056
+ asm("mul.wide.u32 %0, %1, %2;"
1057
+ : "=l"(r) : "r"(a), "r"(b));
1058
+ )
1059
+ return r;
1060
+ }
1061
+ QUALIFIERS double curand_MRG32k3a (curandStateMRG32k3a_t *state)
1062
+ {
1063
+ NV_IF_TARGET(NV_PROVIDES_SM_61,
1064
+ const unsigned int m1 = 4294967087u;
1065
+ const unsigned int m2 = 4294944443u;
1066
+ const unsigned int m1c = 209u;
1067
+ const unsigned int m2c = 22853u;
1068
+ const unsigned int a12 = 1403580u;
1069
+ const unsigned int a13n = 810728u;
1070
+ const unsigned int a21 = 527612u;
1071
+ const unsigned int a23n = 1370589u;
1072
+
1073
+ unsigned long long p1;
1074
+ unsigned long long p2;
1075
+ const unsigned long long p3 = __curand_umul(a13n, m1 - state->s1[0]);
1076
+ p1 = __curand_umad(a12, state->s1[1], p3);
1077
+
1078
+ // Putting addition inside and changing umul to umad
1079
+ // slowed this function down on GV100
1080
+ p1 = __curand_umul(p1 >> 32, m1c) + (p1 & 0xffffffff);
1081
+ if (p1 >= m1) p1 -= m1;
1082
+
1083
+ state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = p1;
1084
+ const unsigned long long p4 = __curand_umul(a23n, m2 - state->s2[0]);
1085
+ p2 = __curand_umad(a21, state->s2[2], p4);
1086
+
1087
+ // Putting addition inside and changing umul to umad
1088
+ // slowed this function down on GV100
1089
+ p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff);
1090
+ p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff);
1091
+ if (p2 >= m2) p2 -= m2;
1092
+
1093
+ state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = p2;
1094
+
1095
+ const unsigned int p5 = (unsigned int)p1 - (unsigned int)p2;
1096
+ if(p1 <= p2) return p5 + m1;
1097
+ return p5;
1098
+ )
1099
+ NV_IF_TARGET(NV_IS_DEVICE,
1100
+ /* nj's implementation */
1101
+ const double m1 = 4294967087.;
1102
+ const double m2 = 4294944443.;
1103
+ const double a12 = 1403580.;
1104
+ const double a13n = 810728.;
1105
+ const double a21 = 527612.;
1106
+ const double a23n = 1370589.;
1107
+
1108
+ const double rh1 = 2.3283065498378290e-010; /* (1.0 / m1)__hi */
1109
+ const double rl1 = -1.7354913086174288e-026; /* (1.0 / m1)__lo */
1110
+ const double rh2 = 2.3283188252407387e-010; /* (1.0 / m2)__hi */
1111
+ const double rl2 = 2.4081018096503646e-026; /* (1.0 / m2)__lo */
1112
+
1113
+ double q;
1114
+ double p1;
1115
+ double p2;
1116
+ p1 = a12 * state->s1[1] - a13n * state->s1[0];
1117
+ q = trunc (fma (p1, rh1, p1 * rl1));
1118
+ p1 -= q * m1;
1119
+ if (p1 < 0.0) p1 += m1;
1120
+ state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = (unsigned int)p1;
1121
+ p2 = a21 * state->s2[2] - a23n * state->s2[0];
1122
+ q = trunc (fma (p2, rh2, p2 * rl2));
1123
+ p2 -= q * m2;
1124
+ if (p2 < 0.0) p2 += m2;
1125
+ state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = (unsigned int)p2;
1126
+ if (p1 <= p2) return (p1 - p2 + m1);
1127
+ else return (p1 - p2);
1128
+ )
1129
+ /* end nj's implementation */
1130
+ double p1;
1131
+ double p2;
1132
+ double r;
1133
+ p1 = (MRG32K3A_A12 * state->s1[1]) - (MRG32K3A_A13N * state->s1[0]);
1134
+ p1 = curand_MRGmod(p1, MRG32K3A_MOD1);
1135
+ if (p1 < 0.0) p1 += MRG32K3A_MOD1;
1136
+ state->s1[0] = state->s1[1];
1137
+ state->s1[1] = state->s1[2];
1138
+ state->s1[2] = (unsigned int)p1;
1139
+ p2 = (MRG32K3A_A21 * state->s2[2]) - (MRG32K3A_A23N * state->s2[0]);
1140
+ p2 = curand_MRGmod(p2, MRG32K3A_MOD2);
1141
+ if (p2 < 0) p2 += MRG32K3A_MOD2;
1142
+ state->s2[0] = state->s2[1];
1143
+ state->s2[1] = state->s2[2];
1144
+ state->s2[2] = (unsigned int)p2;
1145
+ r = p1 - p2;
1146
+ if (r <= 0) r += MRG32K3A_MOD1;
1147
+ return r;
1148
+ }
1149
+
1150
+
1151
+ /**
1152
+ * \brief Return 32-bits of pseudorandomness from an MRG32k3a generator.
1153
+ *
1154
+ * Return 32-bits of pseudorandomness from the MRG32k3a generator in \p state,
1155
+ * increment position of generator by one.
1156
+ *
1157
+ * \param state - Pointer to state to update
1158
+ *
1159
+ * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
1160
+ */
1161
+ QUALIFIERS unsigned int curand(curandStateMRG32k3a_t *state)
1162
+ {
1163
+ double dRet;
1164
+ dRet = (double)curand_MRG32k3a(state)*(double)MRG32K3A_BITS_NORM;
1165
+ return (unsigned int)dRet;
1166
+ }
1167
+
1168
+
1169
+
1170
+ /**
1171
+ * \brief Update MRG32k3a state to skip \p n elements.
1172
+ *
1173
+ * Update the MRG32k3a state in \p state to skip ahead \p n elements.
1174
+ *
1175
+ * All values of \p n are valid. Large values require more computation and so
1176
+ * will take more time to complete.
1177
+ *
1178
+ * \param n - Number of elements to skip
1179
+ * \param state - Pointer to state to update
1180
+ */
1181
+ QUALIFIERS void skipahead(unsigned long long n, curandStateMRG32k3a_t *state)
1182
+ {
1183
+ unsigned int t[3][3];
1184
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
1185
+ curand_MRGmatPow3x3( mrg32k3aM1, t, MRG32K3A_MOD1, n);
1186
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1187
+ curand_MRGmatPow3x3(mrg32k3aM2, t, MRG32K3A_MOD2, n);
1188
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1189
+ ,
1190
+ curand_MRGmatPow3x3( mrg32k3aM1Host, t, MRG32K3A_MOD1, n);
1191
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1192
+ curand_MRGmatPow3x3(mrg32k3aM2Host, t, MRG32K3A_MOD2, n);
1193
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1194
+ )
1195
+ }
1196
+
1197
+ /**
1198
+ * \brief Update MRG32k3a state to skip ahead \p n subsequences.
1199
+ *
1200
+ * Update the MRG32k3a state in \p state to skip ahead \p n subsequences. Each
1201
+ * subsequence is \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly
1202
+ *
1203
+ * \xmlonly<ph outputclass="xmlonly">2<sup>76</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
1204
+ * \xmlonly<ph outputclass="xmlonly">2<sup>67</sup></ph>\endxmlonly * n elements.
1205
+ *
1206
+ * Valid values of \p n are 0 to \xmlonly<ph outputclass="xmlonly">2<sup>51</sup></ph>\endxmlonly. Note \p n will be masked to 51 bits
1207
+ *
1208
+ * \param n - Number of subsequences to skip
1209
+ * \param state - Pointer to state to update
1210
+ */
1211
+ QUALIFIERS void skipahead_subsequence(unsigned long long n, curandStateMRG32k3a_t *state)
1212
+ {
1213
+ unsigned int t[3][3];
1214
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
1215
+ curand_MRGmatPow3x3( mrg32k3aM1SubSeq, t, MRG32K3A_MOD1, n);
1216
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1217
+ curand_MRGmatPow3x3( mrg32k3aM2SubSeq, t, MRG32K3A_MOD2, n);
1218
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1219
+ ,
1220
+ curand_MRGmatPow3x3( mrg32k3aM1SubSeqHost, t, MRG32K3A_MOD1, n);
1221
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1222
+ curand_MRGmatPow3x3( mrg32k3aM2SubSeqHost, t, MRG32K3A_MOD2, n);
1223
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1224
+ )
1225
+ }
1226
+
1227
+ /**
1228
+ * \brief Update MRG32k3a state to skip ahead \p n sequences.
1229
+ *
1230
+ * Update the MRG32k3a state in \p state to skip ahead \p n sequences. Each
1231
+ * sequence is \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly elements long, so this means the function will skip ahead
1232
+ * \xmlonly<ph outputclass="xmlonly">2<sup>127</sup></ph>\endxmlonly * n elements.
1233
+ *
1234
+ * All values of \p n are valid. Large values require more computation and so
1235
+ * will take more time to complete.
1236
+ *
1237
+ * \param n - Number of sequences to skip
1238
+ * \param state - Pointer to state to update
1239
+ */
1240
+ QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateMRG32k3a_t *state)
1241
+ {
1242
+ unsigned int t[3][3];
1243
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
1244
+ curand_MRGmatPow3x3( mrg32k3aM1Seq, t, MRG32K3A_MOD1, n);
1245
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1246
+ curand_MRGmatPow3x3( mrg32k3aM2Seq, t, MRG32K3A_MOD2, n);
1247
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1248
+ ,
1249
+ curand_MRGmatPow3x3( mrg32k3aM1SeqHost, t, MRG32K3A_MOD1, n);
1250
+ curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1);
1251
+ curand_MRGmatPow3x3( mrg32k3aM2SeqHost, t, MRG32K3A_MOD2, n);
1252
+ curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2);
1253
+ )
1254
+ }
1255
+
1256
+
1257
+ /**
1258
+ * \brief Initialize MRG32k3a state.
1259
+ *
1260
+ * Initialize MRG32k3a state in \p state with the given \p seed, \p subsequence,
1261
+ * and \p offset.
1262
+ *
1263
+ * All input values of \p seed, \p subsequence, and \p offset are legal.
1264
+ * \p subsequence will be truncated to 51 bits to avoid running into the next sequence
1265
+ *
1266
+ * A value of 0 for \p seed sets the state to the values of the original
1267
+ * published version of the \p MRG32k3a algorithm.
1268
+ *
1269
+ * \param seed - Arbitrary bits to use as a seed
1270
+ * \param subsequence - Subsequence to start at
1271
+ * \param offset - Absolute offset into sequence
1272
+ * \param state - Pointer to state to initialize
1273
+ */
1274
+ QUALIFIERS void curand_init(unsigned long long seed,
1275
+ unsigned long long subsequence,
1276
+ unsigned long long offset,
1277
+ curandStateMRG32k3a_t *state)
1278
+ {
1279
+ int i;
1280
+ for ( i=0; i<3; i++ ) {
1281
+ state->s1[i] = 12345u;
1282
+ state->s2[i] = 12345u;
1283
+ }
1284
+ if (seed != 0ull) {
1285
+ unsigned int x1 = ((unsigned int)seed) ^ 0x55555555UL;
1286
+ unsigned int x2 = (unsigned int)((seed >> 32) ^ 0xAAAAAAAAUL);
1287
+ state->s1[0] = (unsigned int)curand_MRGmodMul(x1, state->s1[0], MRG32K3A_MOD1);
1288
+ state->s1[1] = (unsigned int)curand_MRGmodMul(x2, state->s1[1], MRG32K3A_MOD1);
1289
+ state->s1[2] = (unsigned int)curand_MRGmodMul(x1, state->s1[2], MRG32K3A_MOD1);
1290
+ state->s2[0] = (unsigned int)curand_MRGmodMul(x2, state->s2[0], MRG32K3A_MOD2);
1291
+ state->s2[1] = (unsigned int)curand_MRGmodMul(x1, state->s2[1], MRG32K3A_MOD2);
1292
+ state->s2[2] = (unsigned int)curand_MRGmodMul(x2, state->s2[2], MRG32K3A_MOD2);
1293
+ }
1294
+ skipahead_subsequence( subsequence, state );
1295
+ skipahead( offset, state );
1296
+ state->boxmuller_flag = 0;
1297
+ state->boxmuller_flag_double = 0;
1298
+ state->boxmuller_extra = 0.f;
1299
+ state->boxmuller_extra_double = 0.;
1300
+ }
1301
+
1302
+ /**
1303
+ * \brief Update Sobol32 state to skip \p n elements.
1304
+ *
1305
+ * Update the Sobol32 state in \p state to skip ahead \p n elements.
1306
+ *
1307
+ * All values of \p n are valid.
1308
+ *
1309
+ * \param n - Number of elements to skip
1310
+ * \param state - Pointer to state to update
1311
+ */
1312
+ template <typename T>
1313
+ QUALIFIERS
1314
+ typename CURAND_STD::enable_if<CURAND_STD::is_same<curandStateSobol32_t*, T>::value || CURAND_STD::is_same<curandStateScrambledSobol32_t*, T>::value>::type
1315
+ skipahead(unsigned int n, T state)
1316
+ {
1317
+ unsigned int i_gray;
1318
+ state->x = state->c;
1319
+ state->i += n;
1320
+ /* Convert state->i to gray code */
1321
+ i_gray = state->i ^ (state->i >> 1);
1322
+ for(unsigned int k = 0; k < 32; k++) {
1323
+ if(i_gray & (1 << k)) {
1324
+ state->x ^= state->direction_vectors[k];
1325
+ }
1326
+ }
1327
+ return;
1328
+ }
1329
+
1330
+ /**
1331
+ * \brief Update Sobol64 state to skip \p n elements.
1332
+ *
1333
+ * Update the Sobol64 state in \p state to skip ahead \p n elements.
1334
+ *
1335
+ * All values of \p n are valid.
1336
+ *
1337
+ * \param n - Number of elements to skip
1338
+ * \param state - Pointer to state to update
1339
+ */
1340
+ template <typename T>
1341
+ QUALIFIERS
1342
+ typename CURAND_STD::enable_if<CURAND_STD::is_same<curandStateSobol64_t*, T>::value || CURAND_STD::is_same<curandStateScrambledSobol64_t*, T>::value>::type
1343
+ skipahead(unsigned long long n, T state)
1344
+ {
1345
+ unsigned long long i_gray;
1346
+ state->x = state->c;
1347
+ state->i += n;
1348
+ /* Convert state->i to gray code */
1349
+ i_gray = state->i ^ (state->i >> 1);
1350
+ for(unsigned k = 0; k < 64; k++) {
1351
+ if(i_gray & (1ULL << k)) {
1352
+ state->x ^= state->direction_vectors[k];
1353
+ }
1354
+ }
1355
+ return;
1356
+ }
1357
+
1358
+ /**
1359
+ * \brief Initialize Sobol32 state.
1360
+ *
1361
+ * Initialize Sobol32 state in \p state with the given \p direction \p vectors and
1362
+ * \p offset.
1363
+ *
1364
+ * The direction vector is a device pointer to an array of 32 unsigned ints.
1365
+ * All input values of \p offset are legal.
1366
+ *
1367
+ * \param direction_vectors - Pointer to array of 32 unsigned ints representing the
1368
+ * direction vectors for the desired dimension
1369
+ * \param offset - Absolute offset into sequence
1370
+ * \param state - Pointer to state to initialize
1371
+ */
1372
+ QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors,
1373
+ unsigned int offset,
1374
+ curandStateSobol32_t *state)
1375
+ {
1376
+ state->i = 0;
1377
+ state->c = 0;
1378
+ for(int i = 0; i < 32; i++) {
1379
+ state->direction_vectors[i] = direction_vectors[i];
1380
+ }
1381
+ state->x = 0;
1382
+ skipahead<curandStateSobol32_t *>(offset, state);
1383
+ }
1384
+ /**
1385
+ * \brief Initialize Scrambled Sobol32 state.
1386
+ *
1387
+ * Initialize Sobol32 state in \p state with the given \p direction \p vectors and
1388
+ * \p offset.
1389
+ *
1390
+ * The direction vector is a device pointer to an array of 32 unsigned ints.
1391
+ * All input values of \p offset are legal.
1392
+ *
1393
+ * \param direction_vectors - Pointer to array of 32 unsigned ints representing the
1394
+ direction vectors for the desired dimension
1395
+ * \param scramble_c Scramble constant
1396
+ * \param offset - Absolute offset into sequence
1397
+ * \param state - Pointer to state to initialize
1398
+ */
1399
+ QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors,
1400
+ unsigned int scramble_c,
1401
+ unsigned int offset,
1402
+ curandStateScrambledSobol32_t *state)
1403
+ {
1404
+ state->i = 0;
1405
+ state->c = scramble_c;
1406
+ for(int i = 0; i < 32; i++) {
1407
+ state->direction_vectors[i] = direction_vectors[i];
1408
+ }
1409
+ state->x = state->c;
1410
+ skipahead<curandStateScrambledSobol32_t *>(offset, state);
1411
+ }
1412
+
1413
+ QUALIFIERS int __curand_find_trailing_zero(unsigned int x)
1414
+ {
1415
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
1416
+ int y = __ffs(~x);
1417
+ if(y)
1418
+ return y - 1;
1419
+ return 31;
1420
+ ,
1421
+ int i = 1;
1422
+ while(x & 1) {
1423
+ i++;
1424
+ x >>= 1;
1425
+ }
1426
+ i = i - 1;
1427
+ return i == 32 ? 31 : i;
1428
+ )
1429
+ }
1430
+
1431
+ QUALIFIERS int __curand_find_trailing_zero(unsigned long long x)
1432
+ {
1433
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
1434
+ int y = __ffsll(~x);
1435
+ if(y)
1436
+ return y - 1;
1437
+ return 63;
1438
+ ,
1439
+ int i = 1;
1440
+ while(x & 1) {
1441
+ i++;
1442
+ x >>= 1;
1443
+ }
1444
+ i = i - 1;
1445
+ return i == 64 ? 63 : i;
1446
+ )
1447
+ }
1448
+
1449
+ /**
1450
+ * \brief Initialize Sobol64 state.
1451
+ *
1452
+ * Initialize Sobol64 state in \p state with the given \p direction \p vectors and
1453
+ * \p offset.
1454
+ *
1455
+ * The direction vector is a device pointer to an array of 64 unsigned long longs.
1456
+ * All input values of \p offset are legal.
1457
+ *
1458
+ * \param direction_vectors - Pointer to array of 64 unsigned long longs representing the
1459
+ direction vectors for the desired dimension
1460
+ * \param offset - Absolute offset into sequence
1461
+ * \param state - Pointer to state to initialize
1462
+ */
1463
+ QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors,
1464
+ unsigned long long offset,
1465
+ curandStateSobol64_t *state)
1466
+ {
1467
+ state->i = 0;
1468
+ state->c = 0;
1469
+ for(int i = 0; i < 64; i++) {
1470
+ state->direction_vectors[i] = direction_vectors[i];
1471
+ }
1472
+ state->x = 0;
1473
+ skipahead<curandStateSobol64_t *>(offset, state);
1474
+ }
1475
+
1476
+ /**
1477
+ * \brief Initialize Scrambled Sobol64 state.
1478
+ *
1479
+ * Initialize Sobol64 state in \p state with the given \p direction \p vectors and
1480
+ * \p offset.
1481
+ *
1482
+ * The direction vector is a device pointer to an array of 64 unsigned long longs.
1483
+ * All input values of \p offset are legal.
1484
+ *
1485
+ * \param direction_vectors - Pointer to array of 64 unsigned long longs representing the
1486
+ direction vectors for the desired dimension
1487
+ * \param scramble_c Scramble constant
1488
+ * \param offset - Absolute offset into sequence
1489
+ * \param state - Pointer to state to initialize
1490
+ */
1491
+ QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors,
1492
+ unsigned long long scramble_c,
1493
+ unsigned long long offset,
1494
+ curandStateScrambledSobol64_t *state)
1495
+ {
1496
+ state->i = 0;
1497
+ state->c = scramble_c;
1498
+ for(int i = 0; i < 64; i++) {
1499
+ state->direction_vectors[i] = direction_vectors[i];
1500
+ }
1501
+ state->x = state->c;
1502
+ skipahead<curandStateScrambledSobol64_t *>(offset, state);
1503
+ }
1504
+
1505
+ /**
1506
+ * \brief Return 32-bits of quasirandomness from a Sobol32 generator.
1507
+ *
1508
+ * Return 32-bits of quasirandomness from the Sobol32 generator in \p state,
1509
+ * increment position of generator by one.
1510
+ *
1511
+ * \param state - Pointer to state to update
1512
+ *
1513
+ * \return 32-bits of quasirandomness as an unsigned int, all bits valid to use.
1514
+ */
1515
+
1516
+ QUALIFIERS unsigned int curand(curandStateSobol32_t * state)
1517
+ {
1518
+ /* Moving from i to i+1 element in gray code is flipping one bit,
1519
+ the trailing zero bit of i
1520
+ */
1521
+ unsigned int res = state->x;
1522
+ state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
1523
+ state->i ++;
1524
+ return res;
1525
+ }
1526
+
1527
+ /**
1528
+ * \brief Return 32-bits of quasirandomness from a scrambled Sobol32 generator.
1529
+ *
1530
+ * Return 32-bits of quasirandomness from the scrambled Sobol32 generator in \p state,
1531
+ * increment position of generator by one.
1532
+ *
1533
+ * \param state - Pointer to state to update
1534
+ *
1535
+ * \return 32-bits of quasirandomness as an unsigned int, all bits valid to use.
1536
+ */
1537
+
1538
+ QUALIFIERS unsigned int curand(curandStateScrambledSobol32_t * state)
1539
+ {
1540
+ /* Moving from i to i+1 element in gray code is flipping one bit,
1541
+ the trailing zero bit of i
1542
+ */
1543
+ unsigned int res = state->x;
1544
+ state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
1545
+ state->i ++;
1546
+ return res;
1547
+ }
1548
+
1549
+ /**
1550
+ * \brief Return 64-bits of quasirandomness from a Sobol64 generator.
1551
+ *
1552
+ * Return 64-bits of quasirandomness from the Sobol64 generator in \p state,
1553
+ * increment position of generator by one.
1554
+ *
1555
+ * \param state - Pointer to state to update
1556
+ *
1557
+ * \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use.
1558
+ */
1559
+
1560
+ QUALIFIERS unsigned long long curand(curandStateSobol64_t * state)
1561
+ {
1562
+ /* Moving from i to i+1 element in gray code is flipping one bit,
1563
+ the trailing zero bit of i
1564
+ */
1565
+ unsigned long long res = state->x;
1566
+ state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
1567
+ state->i ++;
1568
+ return res;
1569
+ }
1570
+
1571
+ /**
1572
+ * \brief Return 64-bits of quasirandomness from a scrambled Sobol64 generator.
1573
+ *
1574
+ * Return 64-bits of quasirandomness from the scrambled Sobol32 generator in \p state,
1575
+ * increment position of generator by one.
1576
+ *
1577
+ * \param state - Pointer to state to update
1578
+ *
1579
+ * \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use.
1580
+ */
1581
+
1582
+ QUALIFIERS unsigned long long curand(curandStateScrambledSobol64_t * state)
1583
+ {
1584
+ /* Moving from i to i+1 element in gray code is flipping one bit,
1585
+ the trailing zero bit of i
1586
+ */
1587
+ unsigned long long res = state->x;
1588
+ state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)];
1589
+ state->i ++;
1590
+ return res;
1591
+ }
1592
+
1593
+ #include "curand_uniform.h"
1594
+ #include "curand_normal.h"
1595
+ #include "curand_lognormal.h"
1596
+ #include "curand_poisson.h"
1597
+ #include "curand_discrete2.h"
1598
+
1599
+ __device__ static inline unsigned int *__get_precalculated_matrix(int n)
1600
+ {
1601
+ if(n == 0) {
1602
+ return precalc_xorwow_matrix[n];
1603
+ }
1604
+ if(n == 2) {
1605
+ return precalc_xorwow_offset_matrix[n];
1606
+ }
1607
+ return precalc_xorwow_matrix[n];
1608
+ }
1609
+
1610
+ #ifndef __CUDACC_RTC__
1611
+ __host__ static inline unsigned int *__get_precalculated_matrix_host(int n)
1612
+ {
1613
+ if(n == 1) {
1614
+ return precalc_xorwow_matrix_host[n];
1615
+ }
1616
+ if(n == 3) {
1617
+ return precalc_xorwow_offset_matrix_host[n];
1618
+ }
1619
+ return precalc_xorwow_matrix_host[n];
1620
+ }
1621
+ #endif // #ifndef __CUDACC_RTC__
1622
+
1623
+ __device__ static inline unsigned int *__get_mrg32k3a_matrix(int n)
1624
+ {
1625
+ if(n == 0) {
1626
+ return mrg32k3aM1[n][0];
1627
+ }
1628
+ if(n == 2) {
1629
+ return mrg32k3aM2[n][0];
1630
+ }
1631
+ if(n == 4) {
1632
+ return mrg32k3aM1SubSeq[n][0];
1633
+ }
1634
+ if(n == 6) {
1635
+ return mrg32k3aM2SubSeq[n][0];
1636
+ }
1637
+ if(n == 8) {
1638
+ return mrg32k3aM1Seq[n][0];
1639
+ }
1640
+ if(n == 10) {
1641
+ return mrg32k3aM2Seq[n][0];
1642
+ }
1643
+ return mrg32k3aM1[n][0];
1644
+ }
1645
+
1646
+ #ifndef __CUDACC_RTC__
1647
+ __host__ static inline unsigned int *__get_mrg32k3a_matrix_host(int n)
1648
+ {
1649
+ if(n == 1) {
1650
+ return mrg32k3aM1Host[n][0];
1651
+ }
1652
+ if(n == 3) {
1653
+ return mrg32k3aM2Host[n][0];
1654
+ }
1655
+ if(n == 5) {
1656
+ return mrg32k3aM1SubSeqHost[n][0];
1657
+ }
1658
+ if(n == 7) {
1659
+ return mrg32k3aM2SubSeqHost[n][0];
1660
+ }
1661
+ if(n == 9) {
1662
+ return mrg32k3aM1SeqHost[n][0];
1663
+ }
1664
+ if(n == 11) {
1665
+ return mrg32k3aM2SeqHost[n][0];
1666
+ }
1667
+ return mrg32k3aM1Host[n][0];
1668
+ }
1669
+
1670
+ __host__ static inline double *__get__cr_lgamma_table_host(void) {
1671
+ return __cr_lgamma_table;
1672
+ }
1673
+ #endif // #ifndef __CUDACC_RTC__
1674
+
1675
+ /** @} */
1676
+
1677
+ #endif // !defined(CURAND_KERNEL_H_)
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_lognormal.h ADDED
@@ -0,0 +1,697 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #if !defined(CURAND_LOGNORMAL_H_)
52
+ #define CURAND_LOGNORMAL_H_
53
+
54
+ /**
55
+ * \defgroup DEVICE Device API
56
+ *
57
+ * @{
58
+ */
59
+
60
+ #ifndef __CUDACC_RTC__
61
+ #include <math.h>
62
+ #endif // __CUDACC_RTC__
63
+
64
+ #include "curand_mrg32k3a.h"
65
+ #include "curand_mtgp32_kernel.h"
66
+ #include "curand_philox4x32_x.h"
67
+
68
+ /**
69
+ * \brief Return a log-normally distributed float from an XORWOW generator.
70
+ *
71
+ * Return a single log-normally distributed float derived from a normal
72
+ * distribution with mean \p mean and standard deviation \p stddev
73
+ * from the XORWOW generator in \p state,
74
+ * increment position of generator by one.
75
+ *
76
+ * The implementation uses a Box-Muller transform to generate two
77
+ * normally distributed results, transforms them to log-normal distribution,
78
+ * then returns them one at a time.
79
+ * See ::curand_log_normal2() for a more efficient version that returns
80
+ * both results at once.
81
+ *
82
+ * \param state - Pointer to state to update
83
+ * \param mean - Mean of the related normal distribution
84
+ * \param stddev - Standard deviation of the related normal distribution
85
+ *
86
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
87
+ */
88
+ QUALIFIERS float curand_log_normal(curandStateXORWOW_t *state, float mean, float stddev)
89
+ {
90
+ if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
91
+ unsigned int x, y;
92
+ x = curand(state);
93
+ y = curand(state);
94
+ float2 v = _curand_box_muller(x, y);
95
+ state->boxmuller_extra = expf(mean + (stddev * v.y));
96
+ state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
97
+ return expf(mean + (stddev * v.x));
98
+ }
99
+ state->boxmuller_flag = 0;
100
+ return state->boxmuller_extra;
101
+ }
102
+
103
+ /**
104
+ * \brief Return a log-normally distributed float from an Philox4_32_10 generator.
105
+ *
106
+ * Return a single log-normally distributed float derived from a normal
107
+ * distribution with mean \p mean and standard deviation \p stddev
108
+ * from the Philox4_32_10 generator in \p state,
109
+ * increment position of generator by one.
110
+ *
111
+ * The implementation uses a Box-Muller transform to generate two
112
+ * normally distributed results, transforms them to log-normal distribution,
113
+ * then returns them one at a time.
114
+ * See ::curand_log_normal2() for a more efficient version that returns
115
+ * both results at once.
116
+ *
117
+ * \param state - Pointer to state to update
118
+ * \param mean - Mean of the related normal distribution
119
+ * \param stddev - Standard deviation of the related normal distribution
120
+ *
121
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
122
+ */
123
+
124
+ QUALIFIERS float curand_log_normal(curandStatePhilox4_32_10_t *state, float mean, float stddev)
125
+ {
126
+ if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
127
+ unsigned int x, y;
128
+ x = curand(state);
129
+ y = curand(state);
130
+ float2 v = _curand_box_muller(x, y);
131
+ state->boxmuller_extra = expf(mean + (stddev * v.y));
132
+ state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
133
+ return expf(mean + (stddev * v.x));
134
+ }
135
+ state->boxmuller_flag = 0;
136
+ return state->boxmuller_extra;
137
+ }
138
+
139
+ /**
140
+ * \brief Return two normally distributed floats from an XORWOW generator.
141
+ *
142
+ * Return two log-normally distributed floats derived from a normal
143
+ * distribution with mean \p mean and standard deviation \p stddev
144
+ * from the XORWOW generator in \p state,
145
+ * increment position of generator by two.
146
+ *
147
+ * The implementation uses a Box-Muller transform to generate two
148
+ * normally distributed results, then transforms them to log-normal.
149
+ *
150
+ * \param state - Pointer to state to update
151
+ * \param mean - Mean of the related normal distribution
152
+ * \param stddev - Standard deviation of the related normal distribution
153
+ *
154
+ * \return Log-normally distributed float2 where each element is from a
155
+ * distribution with mean \p mean and standard deviation \p stddev
156
+ */
157
+ QUALIFIERS float2 curand_log_normal2(curandStateXORWOW_t *state, float mean, float stddev)
158
+ {
159
+ float2 v = curand_box_muller(state);
160
+ v.x = expf(mean + (stddev * v.x));
161
+ v.y = expf(mean + (stddev * v.y));
162
+ return v;
163
+ }
164
+
165
+ /**
166
+ * \brief Return two normally distributed floats from an Philox4_32_10 generator.
167
+ *
168
+ * Return two log-normally distributed floats derived from a normal
169
+ * distribution with mean \p mean and standard deviation \p stddev
170
+ * from the Philox4_32_10 generator in \p state,
171
+ * increment position of generator by two.
172
+ *
173
+ * The implementation uses a Box-Muller transform to generate two
174
+ * normally distributed results, then transforms them to log-normal.
175
+ *
176
+ * \param state - Pointer to state to update
177
+ * \param mean - Mean of the related normal distribution
178
+ * \param stddev - Standard deviation of the related normal distribution
179
+ *
180
+ * \return Log-normally distributed float2 where each element is from a
181
+ * distribution with mean \p mean and standard deviation \p stddev
182
+ */
183
+ QUALIFIERS float2 curand_log_normal2(curandStatePhilox4_32_10_t *state, float mean, float stddev)
184
+ {
185
+ float2 v = curand_box_muller(state);
186
+ v.x = expf(mean + (stddev * v.x));
187
+ v.y = expf(mean + (stddev * v.y));
188
+ return v;
189
+ }
190
+ /**
191
+ * \brief Return four normally distributed floats from an Philox4_32_10 generator.
192
+ *
193
+ * Return four log-normally distributed floats derived from a normal
194
+ * distribution with mean \p mean and standard deviation \p stddev
195
+ * from the Philox4_32_10 generator in \p state,
196
+ * increment position of generator by four.
197
+ *
198
+ * The implementation uses a Box-Muller transform to generate two
199
+ * normally distributed results, then transforms them to log-normal.
200
+ *
201
+ * \param state - Pointer to state to update
202
+ * \param mean - Mean of the related normal distribution
203
+ * \param stddev - Standard deviation of the related normal distribution
204
+ *
205
+ * \return Log-normally distributed float4 where each element is from a
206
+ * distribution with mean \p mean and standard deviation \p stddev
207
+ */
208
+ QUALIFIERS float4 curand_log_normal4(curandStatePhilox4_32_10_t *state, float mean, float stddev)
209
+ {
210
+ float4 v = curand_box_muller4(state);
211
+ v.x = expf(mean + (stddev * v.x));
212
+ v.y = expf(mean + (stddev * v.y));
213
+ v.z = expf(mean + (stddev * v.z));
214
+ v.w = expf(mean + (stddev * v.w));
215
+ return v;
216
+ }
217
+
218
+ /**
219
+ * \brief Return a log-normally distributed float from an MRG32k3a generator.
220
+ *
221
+ * Return a single log-normally distributed float derived from a normal
222
+ * distribution with mean \p mean and standard deviation \p stddev
223
+ * from the MRG32k3a generator in \p state,
224
+ * increment position of generator by one.
225
+ *
226
+ * The implementation uses a Box-Muller transform to generate two
227
+ * normally distributed results, transforms them to log-normal distribution,
228
+ * then returns them one at a time.
229
+ * See ::curand_log_normal2() for a more efficient version that returns
230
+ * both results at once.
231
+ *
232
+ * \param state - Pointer to state to update
233
+ * \param mean - Mean of the related normal distribution
234
+ * \param stddev - Standard deviation of the related normal distribution
235
+ *
236
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
237
+ */
238
+ QUALIFIERS float curand_log_normal(curandStateMRG32k3a_t *state, float mean, float stddev)
239
+ {
240
+ if(state->boxmuller_flag != EXTRA_FLAG_LOG_NORMAL) {
241
+ float2 v = curand_box_muller_mrg(state);
242
+ state->boxmuller_extra = expf(mean + (stddev * v.y));
243
+ state->boxmuller_flag = EXTRA_FLAG_LOG_NORMAL;
244
+ return expf(mean + (stddev * v.x));
245
+ }
246
+ state->boxmuller_flag = 0;
247
+ return state->boxmuller_extra;
248
+ }
249
+
250
+ /**
251
+ * \brief Return two normally distributed floats from an MRG32k3a generator.
252
+ *
253
+ * Return two log-normally distributed floats derived from a normal
254
+ * distribution with mean \p mean and standard deviation \p stddev
255
+ * from the MRG32k3a generator in \p state,
256
+ * increment position of generator by two.
257
+ *
258
+ * The implementation uses a Box-Muller transform to generate two
259
+ * normally distributed results, then transforms them to log-normal.
260
+ *
261
+ * \param state - Pointer to state to update
262
+ * \param mean - Mean of the related normal distribution
263
+ * \param stddev - Standard deviation of the related normal distribution
264
+ *
265
+ * \return Log-normally distributed float2 where each element is from a
266
+ * distribution with mean \p mean and standard deviation \p stddev
267
+ */
268
+ QUALIFIERS float2 curand_log_normal2(curandStateMRG32k3a_t *state, float mean, float stddev)
269
+ {
270
+ float2 v = curand_box_muller_mrg(state);
271
+ v.x = expf(mean + (stddev * v.x));
272
+ v.y = expf(mean + (stddev * v.y));
273
+ return v;
274
+ }
275
+
276
+ /**
277
+ * \brief Return a log-normally distributed float from an MTGP32 generator.
278
+ *
279
+ * Return a single log-normally distributed float derived from a normal
280
+ * distribution with mean \p mean and standard deviation \p stddev
281
+ * from the MTGP32 generator in \p state,
282
+ * increment position of generator.
283
+ *
284
+ * The implementation uses the inverse cumulative distribution function
285
+ * to generate a normally distributed result, then transforms the result
286
+ * to log-normal.
287
+ *
288
+ * \param state - Pointer to state to update
289
+ * \param mean - Mean of the related normal distribution
290
+ * \param stddev - Standard deviation of the related normal distribution
291
+ *
292
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
293
+ */
294
+ QUALIFIERS float curand_log_normal(curandStateMtgp32_t *state, float mean, float stddev)
295
+ {
296
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
297
+ }
298
+
299
+ /**
300
+ * \brief Return a log-normally distributed float from a Sobol32 generator.
301
+ *
302
+ * Return a single log-normally distributed float derived from a normal
303
+ * distribution with mean \p mean and standard deviation \p stddev
304
+ * from the Sobol32 generator in \p state,
305
+ * increment position of generator by one.
306
+ *
307
+ * The implementation uses the inverse cumulative distribution function
308
+ * to generate a normally distributed result, then transforms the result
309
+ * to log-normal.
310
+ *
311
+ * \param state - Pointer to state to update
312
+ * \param mean - Mean of the related normal distribution
313
+ * \param stddev - Standard deviation of the related normal distribution
314
+ *
315
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
316
+ */
317
+ QUALIFIERS float curand_log_normal(curandStateSobol32_t *state, float mean, float stddev)
318
+ {
319
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
320
+ }
321
+ /**
322
+ * \brief Return a log-normally distributed float from a scrambled Sobol32 generator.
323
+ *
324
+ * Return a single log-normally distributed float derived from a normal
325
+ * distribution with mean \p mean and standard deviation \p stddev
326
+ * from the scrambled Sobol32 generator in \p state,
327
+ * increment position of generator by one.
328
+ *
329
+ * The implementation uses the inverse cumulative distribution function
330
+ * to generate a normally distributed result, then transforms the result
331
+ * to log-normal.
332
+ *
333
+ * \param state - Pointer to state to update
334
+ * \param mean - Mean of the related normal distribution
335
+ * \param stddev - Standard deviation of the related normal distribution
336
+ *
337
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
338
+ */
339
+ QUALIFIERS float curand_log_normal(curandStateScrambledSobol32_t *state, float mean, float stddev)
340
+ {
341
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
342
+ }
343
+
344
+ /**
345
+ * \brief Return a log-normally distributed float from a Sobol64 generator.
346
+ *
347
+ * Return a single log-normally distributed float derived from a normal
348
+ * distribution with mean \p mean and standard deviation \p stddev
349
+ * from the Sobol64 generator in \p state,
350
+ * increment position of generator by one.
351
+ *
352
+ * The implementation uses the inverse cumulative distribution function
353
+ * to generate normally distributed results, then converts to log-normal
354
+ * distribution.
355
+ *
356
+ * \param state - Pointer to state to update
357
+ * \param mean - Mean of the related normal distribution
358
+ * \param stddev - Standard deviation of the related normal distribution
359
+ *
360
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
361
+ */
362
+ QUALIFIERS float curand_log_normal(curandStateSobol64_t *state, float mean, float stddev)
363
+ {
364
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
365
+ }
366
+
367
+ /**
368
+ * \brief Return a log-normally distributed float from a scrambled Sobol64 generator.
369
+ *
370
+ * Return a single log-normally distributed float derived from a normal
371
+ * distribution with mean \p mean and standard deviation \p stddev
372
+ * from the scrambled Sobol64 generator in \p state,
373
+ * increment position of generator by one.
374
+ *
375
+ * The implementation uses the inverse cumulative distribution function
376
+ * to generate normally distributed results, then converts to log-normal
377
+ * distribution.
378
+ *
379
+ * \param state - Pointer to state to update
380
+ * \param mean - Mean of the related normal distribution
381
+ * \param stddev - Standard deviation of the related normal distribution
382
+ *
383
+ * \return Log-normally distributed float with mean \p mean and standard deviation \p stddev
384
+ */
385
+ QUALIFIERS float curand_log_normal(curandStateScrambledSobol64_t *state, float mean, float stddev)
386
+ {
387
+ return expf(mean + (stddev * _curand_normal_icdf(curand(state))));
388
+ }
389
+
390
+ /**
391
+ * \brief Return a log-normally distributed double from an XORWOW generator.
392
+ *
393
+ * Return a single normally distributed double derived from a normal
394
+ * distribution with mean \p mean and standard deviation \p stddev
395
+ * from the XORWOW generator in \p state,
396
+ * increment position of generator.
397
+ *
398
+ * The implementation uses a Box-Muller transform to generate two
399
+ * normally distributed results, transforms them to log-normal distribution,
400
+ * then returns them one at a time.
401
+ * See ::curand_log_normal2_double() for a more efficient version that returns
402
+ * both results at once.
403
+ *
404
+ * \param state - Pointer to state to update
405
+ * \param mean - Mean of the related normal distribution
406
+ * \param stddev - Standard deviation of the related normal distribution
407
+ *
408
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
409
+ */
410
+
411
+ QUALIFIERS double curand_log_normal_double(curandStateXORWOW_t *state, double mean, double stddev)
412
+ {
413
+ if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
414
+ unsigned int x0, x1, y0, y1;
415
+ x0 = curand(state);
416
+ x1 = curand(state);
417
+ y0 = curand(state);
418
+ y1 = curand(state);
419
+ double2 v = _curand_box_muller_double(x0, x1, y0, y1);
420
+ state->boxmuller_extra_double = exp(mean + (stddev * v.y));
421
+ state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
422
+ return exp(mean + (stddev * v.x));
423
+ }
424
+ state->boxmuller_flag_double = 0;
425
+ return state->boxmuller_extra_double;
426
+ }
427
+
428
+ /**
429
+ * \brief Return a log-normally distributed double from an Philox4_32_10 generator.
430
+ *
431
+ * Return a single normally distributed double derived from a normal
432
+ * distribution with mean \p mean and standard deviation \p stddev
433
+ * from the Philox4_32_10 generator in \p state,
434
+ * increment position of generator.
435
+ *
436
+ * The implementation uses a Box-Muller transform to generate two
437
+ * normally distributed results, transforms them to log-normal distribution,
438
+ * then returns them one at a time.
439
+ * See ::curand_log_normal2_double() for a more efficient version that returns
440
+ * both results at once.
441
+ *
442
+ * \param state - Pointer to state to update
443
+ * \param mean - Mean of the related normal distribution
444
+ * \param stddev - Standard deviation of the related normal distribution
445
+ *
446
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
447
+ */
448
+
449
+ QUALIFIERS double curand_log_normal_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
450
+ {
451
+ if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
452
+ uint4 _x;
453
+ _x = curand4(state);
454
+ double2 v = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
455
+ state->boxmuller_extra_double = exp(mean + (stddev * v.y));
456
+ state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
457
+ return exp(mean + (stddev * v.x));
458
+ }
459
+ state->boxmuller_flag_double = 0;
460
+ return state->boxmuller_extra_double;
461
+ }
462
+
463
+
464
+ /**
465
+ * \brief Return two log-normally distributed doubles from an XORWOW generator.
466
+ *
467
+ * Return two log-normally distributed doubles derived from a normal
468
+ * distribution with mean \p mean and standard deviation \p stddev
469
+ * from the XORWOW generator in \p state,
470
+ * increment position of generator by two.
471
+ *
472
+ * The implementation uses a Box-Muller transform to generate two
473
+ * normally distributed results, and transforms them to log-normal distribution,.
474
+ *
475
+ * \param state - Pointer to state to update
476
+ * \param mean - Mean of the related normal distribution
477
+ * \param stddev - Standard deviation of the related normal distribution
478
+ *
479
+ * \return Log-normally distributed double2 where each element is from a
480
+ * distribution with mean \p mean and standard deviation \p stddev
481
+ */
482
+ QUALIFIERS double2 curand_log_normal2_double(curandStateXORWOW_t *state, double mean, double stddev)
483
+ {
484
+ double2 v = curand_box_muller_double(state);
485
+ v.x = exp(mean + (stddev * v.x));
486
+ v.y = exp(mean + (stddev * v.y));
487
+ return v;
488
+ }
489
+
490
+ /**
491
+ * \brief Return two log-normally distributed doubles from an Philox4_32_10 generator.
492
+ *
493
+ * Return two log-normally distributed doubles derived from a normal
494
+ * distribution with mean \p mean and standard deviation \p stddev
495
+ * from the Philox4_32_10 generator in \p state,
496
+ * increment position of generator by four.
497
+ *
498
+ * The implementation uses a Box-Muller transform to generate two
499
+ * normally distributed results, and transforms them to log-normal distribution,.
500
+ *
501
+ * \param state - Pointer to state to update
502
+ * \param mean - Mean of the related normal distribution
503
+ * \param stddev - Standard deviation of the related normal distribution
504
+ *
505
+ * \return Log-normally distributed double4 where each element is from a
506
+ * distribution with mean \p mean and standard deviation \p stddev
507
+ */
508
+ QUALIFIERS double2 curand_log_normal2_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
509
+ {
510
+ double2 v = curand_box_muller2_double(state);
511
+ v.x = exp(mean + (stddev * v.x));
512
+ v.y = exp(mean + (stddev * v.y));
513
+ return v;
514
+ }
515
+ // nor part of API
516
+ QUALIFIERS double4 curand_log_normal4_double(curandStatePhilox4_32_10_t *state, double mean, double stddev)
517
+ {
518
+ double4 v = curand_box_muller4_double(state);
519
+ v.x = exp(mean + (stddev * v.x));
520
+ v.y = exp(mean + (stddev * v.y));
521
+ v.z = exp(mean + (stddev * v.z));
522
+ v.w = exp(mean + (stddev * v.w));
523
+ return v;
524
+ }
525
+
526
+ /**
527
+ * \brief Return a log-normally distributed double from an MRG32k3a generator.
528
+ *
529
+ * Return a single normally distributed double derived from a normal
530
+ * distribution with mean \p mean and standard deviation \p stddev
531
+ * from the MRG32k3a generator in \p state,
532
+ * increment position of generator.
533
+ *
534
+ * The implementation uses a Box-Muller transform to generate two
535
+ * normally distributed results, transforms them to log-normal distribution,
536
+ * then returns them one at a time.
537
+ * See ::curand_log_normal2_double() for a more efficient version that returns
538
+ * both results at once.
539
+ *
540
+ * \param state - Pointer to state to update
541
+ * \param mean - Mean of the related normal distribution
542
+ * \param stddev - Standard deviation of the related normal distribution
543
+ *
544
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
545
+ */
546
+ QUALIFIERS double curand_log_normal_double(curandStateMRG32k3a_t *state, double mean, double stddev)
547
+ {
548
+ if(state->boxmuller_flag_double != EXTRA_FLAG_LOG_NORMAL) {
549
+ double2 v = curand_box_muller_mrg_double(state);
550
+ state->boxmuller_extra_double = exp(mean + (stddev * v.y));
551
+ state->boxmuller_flag_double = EXTRA_FLAG_LOG_NORMAL;
552
+ return exp(mean + (stddev * v.x));
553
+ }
554
+ state->boxmuller_flag_double = 0;
555
+ return state->boxmuller_extra_double;
556
+ }
557
+
558
+ /**
559
+ * \brief Return two log-normally distributed doubles from an MRG32k3a generator.
560
+ *
561
+ * Return two log-normally distributed doubles derived from a normal
562
+ * distribution with mean \p mean and standard deviation \p stddev
563
+ * from the MRG32k3a generator in \p state,
564
+ * increment position of generator by two.
565
+ *
566
+ * The implementation uses a Box-Muller transform to generate two
567
+ * normally distributed results, and transforms them to log-normal distribution,.
568
+ *
569
+ * \param state - Pointer to state to update
570
+ * \param mean - Mean of the related normal distribution
571
+ * \param stddev - Standard deviation of the related normal distribution
572
+ *
573
+ * \return Log-normally distributed double2 where each element is from a
574
+ * distribution with mean \p mean and standard deviation \p stddev
575
+ */
576
+ QUALIFIERS double2 curand_log_normal2_double(curandStateMRG32k3a_t *state, double mean, double stddev)
577
+ {
578
+ double2 v = curand_box_muller_mrg_double(state);
579
+ v.x = exp(mean + (stddev * v.x));
580
+ v.y = exp(mean + (stddev * v.y));
581
+ return v;
582
+ }
583
+
584
+ /**
585
+ * \brief Return a log-normally distributed double from an MTGP32 generator.
586
+ *
587
+ * Return a single log-normally distributed double derived from a normal
588
+ * distribution with mean \p mean and standard deviation \p stddev
589
+ * from the MTGP32 generator in \p state,
590
+ * increment position of generator.
591
+ *
592
+ * The implementation uses the inverse cumulative distribution function
593
+ * to generate normally distributed results, and transforms them into
594
+ * log-normal distribution.
595
+ *
596
+ * \param state - Pointer to state to update
597
+ * \param mean - Mean of the related normal distribution
598
+ * \param stddev - Standard deviation of the related normal distribution
599
+ *
600
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
601
+ */
602
+ QUALIFIERS double curand_log_normal_double(curandStateMtgp32_t *state, double mean, double stddev)
603
+ {
604
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
605
+ }
606
+
607
+ /**
608
+ * \brief Return a log-normally distributed double from a Sobol32 generator.
609
+ *
610
+ * Return a single log-normally distributed double derived from a normal
611
+ * distribution with mean \p mean and standard deviation \p stddev
612
+ * from the Sobol32 generator in \p state,
613
+ * increment position of generator by one.
614
+ *
615
+ * The implementation uses the inverse cumulative distribution function
616
+ * to generate normally distributed results, and transforms them into
617
+ * log-normal distribution.
618
+ *
619
+ * \param state - Pointer to state to update
620
+ * \param mean - Mean of the related normal distribution
621
+ * \param stddev - Standard deviation of the related normal distribution
622
+ *
623
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
624
+ */
625
+ QUALIFIERS double curand_log_normal_double(curandStateSobol32_t *state, double mean, double stddev)
626
+ {
627
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
628
+ }
629
+
630
+ /**
631
+ * \brief Return a log-normally distributed double from a scrambled Sobol32 generator.
632
+ *
633
+ * Return a single log-normally distributed double derived from a normal
634
+ * distribution with mean \p mean and standard deviation \p stddev
635
+ * from the scrambled Sobol32 generator in \p state,
636
+ * increment position of generator by one.
637
+ *
638
+ * The implementation uses the inverse cumulative distribution function
639
+ * to generate normally distributed results, and transforms them into
640
+ * log-normal distribution.
641
+ *
642
+ * \param state - Pointer to state to update
643
+ * \param mean - Mean of the related normal distribution
644
+ * \param stddev - Standard deviation of the related normal distribution
645
+ *
646
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
647
+ */
648
+ QUALIFIERS double curand_log_normal_double(curandStateScrambledSobol32_t *state, double mean, double stddev)
649
+ {
650
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
651
+ }
652
+
653
+ /**
654
+ * \brief Return a log-normally distributed double from a Sobol64 generator.
655
+ *
656
+ * Return a single normally distributed double derived from a normal
657
+ * distribution with mean \p mean and standard deviation \p stddev
658
+ * from the Sobol64 generator in \p state,
659
+ * increment position of generator by one.
660
+ *
661
+ * The implementation uses the inverse cumulative distribution function
662
+ * to generate normally distributed results.
663
+ *
664
+ * \param state - Pointer to state to update
665
+ * \param mean - Mean of the related normal distribution
666
+ * \param stddev - Standard deviation of the related normal distribution
667
+ *
668
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
669
+ */
670
+ QUALIFIERS double curand_log_normal_double(curandStateSobol64_t *state, double mean, double stddev)
671
+ {
672
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
673
+ }
674
+
675
+ /**
676
+ * \brief Return a log-normally distributed double from a scrambled Sobol64 generator.
677
+ *
678
+ * Return a single normally distributed double derived from a normal
679
+ * distribution with mean \p mean and standard deviation \p stddev
680
+ * from the scrambled Sobol64 generator in \p state,
681
+ * increment position of generator by one.
682
+ *
683
+ * The implementation uses the inverse cumulative distribution function
684
+ * to generate normally distributed results.
685
+ *
686
+ * \param state - Pointer to state to update
687
+ * \param mean - Mean of the related normal distribution
688
+ * \param stddev - Standard deviation of the related normal distribution
689
+ *
690
+ * \return Log-normally distributed double with mean \p mean and standard deviation \p stddev
691
+ */
692
+ QUALIFIERS double curand_log_normal_double(curandStateScrambledSobol64_t *state, double mean, double stddev)
693
+ {
694
+ return exp(mean + (stddev * _curand_normal_icdf_double(curand(state))));
695
+ }
696
+
697
+ #endif // !defined(CURAND_LOGNORMAL_H_)
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_mrg32k3a.h ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32.h ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CURAND_MTGP32_H
51
+ #define CURAND_MTGP32_H
52
+ /*
53
+ * @file curand_mtgp32.h
54
+ *
55
+ * @brief Mersenne Twister for Graphic Processors (mtgp32), which
56
+ * generates 32-bit unsigned integers and single precision floating
57
+ * point numbers based on IEEE 754 format.
58
+ *
59
+ * @author Mutsuo Saito (Hiroshima University)
60
+ * @author Makoto Matsumoto (Hiroshima University)
61
+ *
62
+ */
63
+ /*
64
+ * Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
65
+ * University. All rights reserved.
66
+ * Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
67
+ * University and University of Tokyo. All rights reserved.
68
+ *
69
+ * Redistribution and use in source and binary forms, with or without
70
+ * modification, are permitted provided that the following conditions are
71
+ * met:
72
+ *
73
+ * * Redistributions of source code must retain the above copyright
74
+ * notice, this list of conditions and the following disclaimer.
75
+ * * Redistributions in binary form must reproduce the above
76
+ * copyright notice, this list of conditions and the following
77
+ * disclaimer in the documentation and/or other materials provided
78
+ * with the distribution.
79
+ * * Neither the name of the Hiroshima University nor the names of
80
+ * its contributors may be used to endorse or promote products
81
+ * derived from this software without specific prior written
82
+ * permission.
83
+ *
84
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
85
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
86
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
87
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
88
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
89
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
90
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
91
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
92
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
93
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
94
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95
+ */
96
+
97
+
98
+ #define MTGPDC_MEXP 11213
99
+ #define MTGPDC_N 351
100
+ #define MTGPDC_FLOOR_2P 256
101
+ #define MTGPDC_CEIL_2P 512
102
+ #define MTGPDC_PARAM_TABLE mtgp32dc_params_fast_11213
103
+ #define MTGP32_STATE_SIZE 1024
104
+ #define MTGP32_STATE_MASK 1023
105
+ #define CURAND_NUM_MTGP32_PARAMS 200
106
+ #define MEXP 11213
107
+ #define THREAD_NUM MTGPDC_FLOOR_2P
108
+ #define LARGE_SIZE (THREAD_NUM * 3)
109
+ #define TBL_SIZE 16
110
+
111
+ /**
112
+ * \addtogroup DEVICE Device API
113
+ *
114
+ * @{
115
+ */
116
+
117
+ /*
118
+ * \struct MTGP32_PARAMS_FAST_T
119
+ * MTGP32 parameters.
120
+ * Some element is redundant to keep structure simple.
121
+ *
122
+ * \b pos is a pick up position which is selected to have good
123
+ * performance on graphic processors. 3 < \b pos < Q, where Q is a
124
+ * maximum number such that the size of status array - Q is a power of
125
+ * 2. For example, when \b mexp is 44497, size of 32-bit status array
126
+ * is 696, and Q is 184, then \b pos is between 4 and 183. This means
127
+ * 512 parallel calculations is allowed when \b mexp is 44497.
128
+ *
129
+ * \b poly_sha1 is SHA1 digest of the characteristic polynomial of
130
+ * state transition function. SHA1 is calculated based on printing
131
+ * form of the polynomial. This is important when we use parameters
132
+ * generated by the dynamic creator which
133
+ *
134
+ * \b mask This is a mask to make the dimension of state space have
135
+ * just Mersenne Prime. This is redundant.
136
+ */
137
+
138
+ struct mtgp32_params_fast;
139
+
140
+ struct mtgp32_params_fast {
141
+ int mexp; /*< Mersenne exponent. This is redundant. */
142
+ int pos; /*< pick up position. */
143
+ int sh1; /*< shift value 1. 0 < sh1 < 32. */
144
+ int sh2; /*< shift value 2. 0 < sh2 < 32. */
145
+ unsigned int tbl[16]; /*< a small matrix. */
146
+ unsigned int tmp_tbl[16]; /*< a small matrix for tempering. */
147
+ unsigned int flt_tmp_tbl[16]; /*< a small matrix for tempering and
148
+ converting to float. */
149
+ unsigned int mask; /*< This is a mask for state space */
150
+ unsigned char poly_sha1[21]; /*< SHA1 digest */
151
+ };
152
+
153
+ /** \cond UNHIDE_TYPEDEFS */
154
+ typedef struct mtgp32_params_fast mtgp32_params_fast_t;
155
+ /** \endcond */
156
+
157
+ /*
158
+ * Generator Parameters.
159
+ */
160
+ struct mtgp32_kernel_params;
161
+ struct mtgp32_kernel_params {
162
+ unsigned int pos_tbl[CURAND_NUM_MTGP32_PARAMS];
163
+ unsigned int param_tbl[CURAND_NUM_MTGP32_PARAMS][TBL_SIZE];
164
+ unsigned int temper_tbl[CURAND_NUM_MTGP32_PARAMS][TBL_SIZE];
165
+ unsigned int single_temper_tbl[CURAND_NUM_MTGP32_PARAMS][TBL_SIZE];
166
+ unsigned int sh1_tbl[CURAND_NUM_MTGP32_PARAMS];
167
+ unsigned int sh2_tbl[CURAND_NUM_MTGP32_PARAMS];
168
+ unsigned int mask[1];
169
+ };
170
+
171
+ /** \cond UNHIDE_TYPEDEFS */
172
+ typedef struct mtgp32_kernel_params mtgp32_kernel_params_t;
173
+ /** \endcond */
174
+
175
+
176
+
177
+ /*
178
+ * kernel I/O
179
+ * This structure must be initialized before first use.
180
+ */
181
+
182
+ /* MTGP (Mersenne Twister) RNG */
183
+ /* This generator uses the Mersenne Twister algorithm of
184
+ * http://arxiv.org/abs/1005.4973v2
185
+ * Has period 2^11213.
186
+ */
187
+
188
+ /**
189
+ * CURAND MTGP32 state
190
+ */
191
+ struct curandStateMtgp32;
192
+
193
+ struct curandStateMtgp32 {
194
+ unsigned int s[MTGP32_STATE_SIZE];
195
+ int offset;
196
+ int pIdx;
197
+ mtgp32_kernel_params_t * k;
198
+ };
199
+
200
+ /*
201
+ * CURAND MTGP32 state
202
+ */
203
+ /** \cond UNHIDE_TYPEDEFS */
204
+ typedef struct curandStateMtgp32 curandStateMtgp32_t;
205
+ /** \endcond */
206
+
207
+ /** @} */
208
+
209
+ #endif
210
+
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_host.h ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * curand_mtgp32_host.h
52
+ *
53
+ *
54
+ * MTGP32-11213
55
+ *
56
+ * Mersenne Twister RNG for the GPU
57
+ *
58
+ * The period of generated integers is 2<sup>11213</sup>-1.
59
+ *
60
+ * This code generates 32-bit unsigned integers, and
61
+ * single precision floating point numbers uniformly distributed
62
+ * in the range [1, 2). (float r; 1.0 <= r < 2.0)
63
+ */
64
+
65
+ /*
66
+ * Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
67
+ * University. All rights reserved.
68
+ * Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
69
+ * University and University of Tokyo. All rights reserved.
70
+ *
71
+ * Redistribution and use in source and binary forms, with or without
72
+ * modification, are permitted provided that the following conditions are
73
+ * met:
74
+ *
75
+ * * Redistributions of source code must retain the above copyright
76
+ * notice, this list of conditions and the following disclaimer.
77
+ * * Redistributions in binary form must reproduce the above
78
+ * copyright notice, this list of conditions and the following
79
+ * disclaimer in the documentation and/or other materials provided
80
+ * with the distribution.
81
+ * * Neither the name of the Hiroshima University nor the names of
82
+ * its contributors may be used to endorse or promote products
83
+ * derived from this software without specific prior written
84
+ * permission.
85
+ *
86
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
87
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
88
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
89
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
90
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
91
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
92
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
93
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
94
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
96
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97
+ */
98
+ #if !defined CURAND_MTGP32_HOST_H
99
+ #define CURAND_MTGP32_HOST_H
100
+
101
+ #if !defined(QUALIFIERS)
102
+ #define QUALIFIERS static inline __device__
103
+ #endif
104
+
105
+ #include <cuda_runtime.h>
106
+ #include <stdlib.h>
107
+ #include <memory.h>
108
+ #include <string.h>
109
+ #include "curand.h"
110
+ #include "curand_mtgp32.h"
111
+ #include "curand_mtgp32dc_p_11213.h"
112
+
113
+
114
+ /**
115
+ * \addtogroup DEVICE Device API
116
+ *
117
+ * @{
118
+ */
119
+
120
+ static const unsigned int non_zero = 0x4d544750;
121
+
122
+ /*
123
+ * This function represents a function used in the initialization
124
+ * by mtgp32_init_by_array() and mtgp32_init_by_str().
125
+ * @param[in] x 32-bit integer
126
+ * @return 32-bit integer
127
+ */
128
+ static __forceinline__ unsigned int ini_func1(unsigned int x) {
129
+ return (x ^ (x >> 27)) * (1664525);
130
+ }
131
+
132
+ /*
133
+ * This function represents a function used in the initialization
134
+ * by mtgp32_init_by_array() and mtgp32_init_by_str().
135
+ * @param[in] x 32-bit integer
136
+ * @return 32-bit integer
137
+ */
138
+ static __forceinline__ unsigned int ini_func2(unsigned int x) {
139
+ return (x ^ (x >> 27)) * (1566083941);
140
+ }
141
+
142
+ /*
143
+ * This function initializes the internal state array with a 32-bit
144
+ * integer seed. The allocated memory should be freed by calling
145
+ * mtgp32_free(). \b para should be one of the elements in the
146
+ * parameter table (mtgp32-param-ref.c).
147
+ *
148
+ * This function is call by cuda program, because cuda program uses
149
+ * another structure and another allocation method.
150
+ *
151
+ * @param[out] array MTGP internal status vector.
152
+ * @param[in] para parameter structure
153
+ * @param[in] seed a 32-bit integer used as the seed.
154
+ */
155
+ static __forceinline__ __host__
156
+ void mtgp32_init_state(unsigned int state[],
157
+ const mtgp32_params_fast_t *para, unsigned int seed) {
158
+ int i;
159
+ int size = para->mexp / 32 + 1;
160
+ unsigned int hidden_seed;
161
+ unsigned int tmp;
162
+ hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
163
+ tmp = hidden_seed;
164
+ tmp += tmp >> 16;
165
+ tmp += tmp >> 8;
166
+ memset(state, tmp & 0xff, sizeof(unsigned int) * size);
167
+ state[0] = seed;
168
+ state[1] = hidden_seed;
169
+ for (i = 1; i < size; i++) {
170
+ state[i] ^= (1812433253) * (state[i - 1] ^ (state[i - 1] >> 30)) + i;
171
+ }
172
+ }
173
+
174
+ /*
175
+ * This function initializes the internal state array
176
+ * with a 32-bit integer array. \b para should be one of the elements in
177
+ * the parameter table (mtgp32-param-ref.c).
178
+ *
179
+ * @param[out] mtgp32 MTGP structure.
180
+ * @param[in] para parameter structure
181
+ * @param[in] array a 32-bit integer array used as a seed.
182
+ * @param[in] length length of the array.
183
+ * @return CURAND_STATUS_SUCCESS
184
+ */
185
+ static __forceinline__ __host__
186
+ int mtgp32_init_by_array(unsigned int state[],
187
+ const mtgp32_params_fast_t *para,
188
+ unsigned int *array, int length) {
189
+ int i, j, count;
190
+ unsigned int r;
191
+ int lag;
192
+ int mid;
193
+ int size = para->mexp / 32 + 1;
194
+ unsigned int hidden_seed;
195
+ unsigned int tmp;
196
+
197
+ if (size >= 623) {
198
+ lag = 11;
199
+ } else if (size >= 68) {
200
+ lag = 7;
201
+ } else if (size >= 39) {
202
+ lag = 5;
203
+ } else {
204
+ lag = 3;
205
+ }
206
+ mid = (size - lag) / 2;
207
+
208
+ hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
209
+ tmp = hidden_seed;
210
+ tmp += tmp >> 16;
211
+ tmp += tmp >> 8;
212
+ memset(state, tmp & 0xff, sizeof(unsigned int) * size);
213
+ state[0] = hidden_seed;
214
+
215
+ if (length + 1 > size) {
216
+ count = length + 1;
217
+ } else {
218
+ count = size;
219
+ }
220
+ r = ini_func1(state[0] ^ state[mid] ^ state[size - 1]);
221
+ state[mid] += r;
222
+ r += length;
223
+ state[(mid + lag) % size] += r;
224
+ state[0] = r;
225
+ i = 1;
226
+ count--;
227
+ for (i = 1, j = 0; (j < count) && (j < length); j++) {
228
+ r = ini_func1(state[i] ^ state[(i + mid) % size]
229
+ ^ state[(i + size - 1) % size]);
230
+ state[(i + mid) % size] += r;
231
+ r += array[j] + i;
232
+ state[(i + mid + lag) % size] += r;
233
+ state[i] = r;
234
+ i = (i + 1) % size;
235
+ }
236
+ for (; j < count; j++) {
237
+ r = ini_func1(state[i] ^ state[(i + mid) % size]
238
+ ^ state[(i + size - 1) % size]);
239
+ state[(i + mid) % size] += r;
240
+ r += i;
241
+ state[(i + mid + lag) % size] += r;
242
+ state[i] = r;
243
+ i = (i + 1) % size;
244
+ }
245
+ for (j = 0; j < size; j++) {
246
+ r = ini_func2(state[i] + state[(i + mid) % size]
247
+ + state[(i + size - 1) % size]);
248
+ state[(i + mid) % size] ^= r;
249
+ r -= i;
250
+ state[(i + mid + lag) % size] ^= r;
251
+ state[i] = r;
252
+ i = (i + 1) % size;
253
+ }
254
+ if (state[size - 1] == 0) {
255
+ state[size - 1] = non_zero;
256
+ }
257
+ return 0;
258
+ }
259
+
260
+ /*
261
+ * This function initializes the internal state array
262
+ * with a character array. \b para should be one of the elements in
263
+ * the parameter table (mtgp32-param-ref.c).
264
+ * This is the same algorithm with mtgp32_init_by_array(), but hope to
265
+ * be more useful.
266
+ *
267
+ * @param[out] mtgp32 MTGP structure.
268
+ * @param[in] para parameter structure
269
+ * @param[in] array a character array used as a seed. (terminated by zero.)
270
+ * @return memory allocation result. if 0 then O.K.
271
+ */
272
+ static __forceinline__ __host__
273
+ int mtgp32_init_by_str(unsigned int state[],
274
+ const mtgp32_params_fast_t *para, unsigned char *array) {
275
+ int i, j, count;
276
+ unsigned int r;
277
+ int lag;
278
+ int mid;
279
+ int size = para->mexp / 32 + 1;
280
+ int length = (unsigned int)strlen((char *)array);
281
+ unsigned int hidden_seed;
282
+ unsigned int tmp;
283
+
284
+ if (size >= 623) {
285
+ lag = 11;
286
+ } else if (size >= 68) {
287
+ lag = 7;
288
+ } else if (size >= 39) {
289
+ lag = 5;
290
+ } else {
291
+ lag = 3;
292
+ }
293
+ mid = (size - lag) / 2;
294
+
295
+ hidden_seed = para->tbl[4] ^ (para->tbl[8] << 16);
296
+ tmp = hidden_seed;
297
+ tmp += tmp >> 16;
298
+ tmp += tmp >> 8;
299
+ memset(state, tmp & 0xff, sizeof(unsigned int) * size);
300
+ state[0] = hidden_seed;
301
+
302
+ if (length + 1 > size) {
303
+ count = length + 1;
304
+ } else {
305
+ count = size;
306
+ }
307
+ r = ini_func1(state[0] ^ state[mid] ^ state[size - 1]);
308
+ state[mid] += r;
309
+ r += length;
310
+ state[(mid + lag) % size] += r;
311
+ state[0] = r;
312
+ i = 1;
313
+ count--;
314
+ for (i = 1, j = 0; (j < count) && (j < length); j++) {
315
+ r = ini_func1(state[i] ^ state[(i + mid) % size]
316
+ ^ state[(i + size - 1) % size]);
317
+ state[(i + mid) % size] += r;
318
+ r += array[j] + i;
319
+ state[(i + mid + lag) % size] += r;
320
+ state[i] = r;
321
+ i = (i + 1) % size;
322
+ }
323
+ for (; j < count; j++) {
324
+ r = ini_func1(state[i] ^ state[(i + mid) % size]
325
+ ^ state[(i + size - 1) % size]);
326
+ state[(i + mid) % size] += r;
327
+ r += i;
328
+ state[(i + mid + lag) % size] += r;
329
+ state[i] = r;
330
+ i = (i + 1) % size;
331
+ }
332
+ for (j = 0; j < size; j++) {
333
+ r = ini_func2(state[i] + state[(i + mid) % size]
334
+ + state[(i + size - 1) % size]);
335
+ state[(i + mid) % size] ^= r;
336
+ r -= i;
337
+ state[(i + mid + lag) % size] ^= r;
338
+ state[i] = r;
339
+ i = (i + 1) % size;
340
+ }
341
+ if (state[size - 1] == 0) {
342
+ state[size - 1] = non_zero;
343
+ }
344
+ return 0;
345
+ }
346
+
347
+ template<typename ParamsType>
348
+ static __forceinline__ __host__
349
+ curandStatus_t curandMakeMTGP32ConstantsImpl(const mtgp32_params_fast_t params[], ParamsType * p, const int block_num)
350
+ {
351
+ const int size1 = sizeof(unsigned int) * block_num;
352
+ const int size2 = sizeof(unsigned int) * block_num * TBL_SIZE;
353
+ unsigned int *h_pos_tbl;
354
+ unsigned int *h_sh1_tbl;
355
+ unsigned int *h_sh2_tbl;
356
+ unsigned int *h_param_tbl;
357
+ unsigned int *h_temper_tbl;
358
+ unsigned int *h_single_temper_tbl;
359
+ unsigned int *h_mask;
360
+ curandStatus_t status = CURAND_STATUS_SUCCESS;
361
+
362
+ h_pos_tbl = (unsigned int *)malloc(size1);
363
+ h_sh1_tbl = (unsigned int *)malloc(size1);
364
+ h_sh2_tbl = (unsigned int *)malloc(size1);
365
+ h_param_tbl = (unsigned int *)malloc(size2);
366
+ h_temper_tbl = (unsigned int *)malloc(size2);
367
+ h_single_temper_tbl = (unsigned int *)malloc(size2);
368
+ h_mask = (unsigned int *)malloc(sizeof(unsigned int));
369
+ if (h_pos_tbl == NULL
370
+ || h_sh1_tbl == NULL
371
+ || h_sh2_tbl == NULL
372
+ || h_param_tbl == NULL
373
+ || h_temper_tbl == NULL
374
+ || h_single_temper_tbl == NULL
375
+ || h_mask == NULL) {
376
+ if (h_pos_tbl != NULL) free(h_pos_tbl);
377
+ if (h_sh1_tbl != NULL) free(h_sh1_tbl);
378
+ if (h_sh2_tbl != NULL) free(h_sh2_tbl);
379
+ if (h_param_tbl != NULL) free(h_param_tbl);
380
+ if (h_temper_tbl != NULL) free(h_temper_tbl);
381
+ if (h_single_temper_tbl != NULL) free(h_single_temper_tbl);
382
+ if (h_mask != NULL) free(h_mask);
383
+ status = CURAND_STATUS_ALLOCATION_FAILED;
384
+ } else {
385
+
386
+ h_mask[0] = params[0].mask;
387
+ for (int i = 0; i < block_num; i++) {
388
+ h_pos_tbl[i] = params[i].pos;
389
+ h_sh1_tbl[i] = params[i].sh1;
390
+ h_sh2_tbl[i] = params[i].sh2;
391
+ for (int j = 0; j < TBL_SIZE; j++) {
392
+ h_param_tbl[i * TBL_SIZE + j] = params[i].tbl[j];
393
+ h_temper_tbl[i * TBL_SIZE + j] = params[i].tmp_tbl[j];
394
+ h_single_temper_tbl[i * TBL_SIZE + j] = params[i].flt_tmp_tbl[j];
395
+ }
396
+ }
397
+ if (cudaMemcpy( p->pos_tbl,
398
+ h_pos_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
399
+ {
400
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
401
+ } else
402
+ if (cudaMemcpy( p->sh1_tbl,
403
+ h_sh1_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
404
+ {
405
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
406
+ } else
407
+ if (cudaMemcpy( p->sh2_tbl,
408
+ h_sh2_tbl, size1, cudaMemcpyHostToDevice) != cudaSuccess)
409
+ {
410
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
411
+ } else
412
+ if (cudaMemcpy( p->param_tbl,
413
+ h_param_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
414
+ {
415
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
416
+ } else
417
+ if (cudaMemcpy( p->temper_tbl,
418
+ h_temper_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
419
+ {
420
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
421
+ } else
422
+ if (cudaMemcpy( p->single_temper_tbl,
423
+ h_single_temper_tbl, size2, cudaMemcpyHostToDevice) != cudaSuccess)
424
+ {
425
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
426
+ } else
427
+ if (cudaMemcpy( p->mask,
428
+ h_mask, sizeof(unsigned int), cudaMemcpyHostToDevice) != cudaSuccess)
429
+ {
430
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
431
+ }
432
+ }
433
+ if (h_pos_tbl != NULL) free(h_pos_tbl);
434
+ if (h_sh1_tbl != NULL) free(h_sh1_tbl);
435
+ if (h_sh2_tbl != NULL) free(h_sh2_tbl);
436
+ if (h_param_tbl != NULL) free(h_param_tbl);
437
+ if (h_temper_tbl != NULL) free(h_temper_tbl);
438
+ if (h_single_temper_tbl != NULL)free(h_single_temper_tbl);
439
+ if (h_mask != NULL) free(h_mask);
440
+ return status;
441
+ }
442
+
443
+ /**
444
+ * \brief Set up constant parameters for the mtgp32 generator
445
+ *
446
+ * This host-side helper function re-organizes CURAND_NUM_MTGP32_PARAMS sets of
447
+ * generator parameters for use by kernel functions and copies the
448
+ * result to the specified location in device memory.
449
+ *
450
+ * \param params - Pointer to an array of type mtgp32_params_fast_t in host memory
451
+ * \param p - pointer to a structure of type mtgp32_kernel_params_t in device memory.
452
+ *
453
+ * \return
454
+ * - CURAND_STATUS_ALLOCATION_FAILED if host memory could not be allocated
455
+ * - CURAND_STATUS_INITIALIZATION_FAILED if the copy to device memory failed
456
+ * - CURAND_STATUS_SUCCESS otherwise
457
+ */
458
+ static __forceinline__ __host__
459
+ curandStatus_t curandMakeMTGP32Constants(const mtgp32_params_fast_t params[], mtgp32_kernel_params_t * p)
460
+ {
461
+ return curandMakeMTGP32ConstantsImpl(params, p, CURAND_NUM_MTGP32_PARAMS);
462
+ }
463
+
464
+ /**
465
+ * \brief Set up initial states for the mtgp32 generator
466
+ *
467
+ * This host-side helper function initializes a number of states (one parameter set per state) for
468
+ * an mtgp32 generator. To accomplish this it allocates a state array in host memory,
469
+ * initializes that array, and copies the result to device memory.
470
+ *
471
+ * \param s - pointer to an array of states in device memory
472
+ * \param params - Pointer to an array of type mtgp32_params_fast_t in host memory
473
+ * \param k - pointer to a structure of type mtgp32_kernel_params_t in device memory
474
+ * \param n - number of parameter sets/states to initialize
475
+ * \param seed - seed value
476
+ *
477
+ * \return
478
+ * - CURAND_STATUS_ALLOCATION_FAILED if host memory state could not be allocated
479
+ * - CURAND_STATUS_INITIALIZATION_FAILED if the copy to device memory failed
480
+ * - CURAND_STATUS_SUCCESS otherwise
481
+ */
482
+ static __forceinline__ __host__
483
+ curandStatus_t CURANDAPI curandMakeMTGP32KernelState(curandStateMtgp32_t *s,
484
+ mtgp32_params_fast_t params[],
485
+ mtgp32_kernel_params_t *k,
486
+ int n,
487
+ unsigned long long seed)
488
+ {
489
+ int i;
490
+ curandStatus_t status = CURAND_STATUS_SUCCESS;
491
+ curandStateMtgp32_t *h_status =(curandStateMtgp32_t *) malloc(sizeof(curandStateMtgp32_t) * n);
492
+ if (h_status == NULL) {
493
+ status = CURAND_STATUS_ALLOCATION_FAILED;
494
+ } else {
495
+ seed = seed ^ (seed >> 32);
496
+ for (i = 0; i < n; i++) {
497
+ mtgp32_init_state(&(h_status[i].s[0]), &params[i],(unsigned int)seed + i + 1);
498
+ h_status[i].offset = 0;
499
+ h_status[i].pIdx = i;
500
+ h_status[i].k = k;
501
+ }
502
+ if (cudaMemcpy(s, h_status,
503
+ sizeof(curandStateMtgp32_t) * n,
504
+ cudaMemcpyHostToDevice) != cudaSuccess) {
505
+ status = CURAND_STATUS_INITIALIZATION_FAILED;
506
+ }
507
+ }
508
+ free(h_status);
509
+ return status;
510
+ }
511
+
512
+ /** @} */
513
+
514
+ #endif
515
+
516
+
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32_kernel.h ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * curand_mtgp32_kernel.h
52
+ *
53
+ *
54
+ * MTGP32-11213
55
+ *
56
+ * Mersenne Twister RNG for the GPU
57
+ *
58
+ * The period of generated integers is 2<sup>11213</sup>-1.
59
+ *
60
+ * This code generates 32-bit unsigned integers, and
61
+ * single precision floating point numbers uniformly distributed
62
+ * in the range [1, 2). (float r; 1.0 <= r < 2.0)
63
+ */
64
+
65
+ /*
66
+ * Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
67
+ * University. All rights reserved.
68
+ * Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
69
+ * University and University of Tokyo. All rights reserved.
70
+ *
71
+ * Redistribution and use in source and binary forms, with or without
72
+ * modification, are permitted provided that the following conditions are
73
+ * met:
74
+ *
75
+ * * Redistributions of source code must retain the above copyright
76
+ * notice, this list of conditions and the following disclaimer.
77
+ * * Redistributions in binary form must reproduce the above
78
+ * copyright notice, this list of conditions and the following
79
+ * disclaimer in the documentation and/or other materials provided
80
+ * with the distribution.
81
+ * * Neither the name of the Hiroshima University nor the names of
82
+ * its contributors may be used to endorse or promote products
83
+ * derived from this software without specific prior written
84
+ * permission.
85
+ *
86
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
87
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
88
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
89
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
90
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
91
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
92
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
93
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
94
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
96
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97
+ */
98
+ #if !defined CURAND_MTGP32_KERNEL_H
99
+ #define CURAND_MTGP32_KERNEL_H
100
+
101
+ #if !defined(QUALIFIERS)
102
+ #define QUALIFIERS static __forceinline__ __device__
103
+ #endif
104
+
105
+ #ifndef __CUDACC_RTC__
106
+ #include <cuda_runtime.h>
107
+ #include <stdlib.h>
108
+ #include <memory.h>
109
+ #include <string.h>
110
+ #endif // ifndef __CUDACC_RTC__
111
+ #include <nv/target>
112
+ #include "curand.h"
113
+ #include "curand_mtgp32.h"
114
+
115
+ /**
116
+ * \addtogroup DEVICE Device API
117
+ *
118
+ * @{
119
+ */
120
+
121
+ #ifndef __CUDA_ARCH__
122
+ // define blockDim and threadIdx for host compatibility call
123
+ extern const dim3 blockDim;
124
+ extern const uint3 threadIdx;
125
+ #endif
126
+
127
+
128
+ /*
129
+ * The function of the recursion formula calculation.
130
+ *
131
+ * @param[in] X1 the farthest part of state array.
132
+ * @param[in] X2 the second farthest part of state array.
133
+ * @param[in] Y a part of state array.
134
+ * @param[in] bid block id.
135
+ * @return output
136
+ */
137
+ QUALIFIERS unsigned int para_rec(mtgp32_kernel_params_t * k,unsigned int X1, unsigned int X2, unsigned int Y, int bid) {
138
+ unsigned int X = (X1 & k->mask[0]) ^ X2;
139
+ unsigned int MAT;
140
+
141
+ X ^= X << k->sh1_tbl[bid];
142
+ Y = X ^ (Y >> k->sh2_tbl[bid]);
143
+ MAT = k->param_tbl[bid][Y & 0x0f];
144
+ return Y ^ MAT;
145
+ }
146
+
147
+ /*
148
+ * The tempering function.
149
+ *
150
+ * @param[in] V the output value should be tempered.
151
+ * @param[in] T the tempering helper value.
152
+ * @param[in] bid block id.
153
+ * @return the tempered value.
154
+ */
155
+ QUALIFIERS unsigned int temper(mtgp32_kernel_params_t * k,unsigned int V, unsigned int T, int bid) {
156
+ unsigned int MAT;
157
+
158
+ T ^= T >> 16;
159
+ T ^= T >> 8;
160
+ MAT = k->temper_tbl[bid][T & 0x0f];
161
+ return V ^ MAT;
162
+ }
163
+
164
+ /*
165
+ * The tempering and converting function.
166
+ * By using the preset table, converting to IEEE format
167
+ * and tempering are done simultaneously.
168
+ *
169
+ * @param[in] V the output value should be tempered.
170
+ * @param[in] T the tempering helper value.
171
+ * @param[in] bid block id.
172
+ * @return the tempered and converted value.
173
+ */
174
+ QUALIFIERS unsigned int temper_single(mtgp32_kernel_params_t * k,unsigned int V, unsigned int T, int bid) {
175
+ unsigned int MAT;
176
+ unsigned int r;
177
+
178
+ T ^= T >> 16;
179
+ T ^= T >> 8;
180
+ MAT = k->single_temper_tbl[bid][T & 0x0f];
181
+ r = (V >> 9) ^ MAT;
182
+ return r;
183
+ }
184
+
185
+ /**
186
+ * \brief Return 32-bits of pseudorandomness from a mtgp32 generator.
187
+ *
188
+ * Return 32-bits of pseudorandomness from the mtgp32 generator in \p state,
189
+ * increment position of generator by the number of threads in the block.
190
+ * Note the number of threads in the block can not exceed 256.
191
+ *
192
+ * \param state - Pointer to state to update
193
+ *
194
+ * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
195
+ */
196
+ QUALIFIERS unsigned int curand(curandStateMtgp32_t *state)
197
+ {
198
+ unsigned int t;
199
+ unsigned int d;
200
+ int pos = state->k->pos_tbl[state->pIdx];
201
+ unsigned int r;
202
+ unsigned int o;
203
+
204
+ d = blockDim.z * blockDim.y * blockDim.x;
205
+ //assert( d <= 256 );
206
+ t = (blockDim.z * blockDim.y * threadIdx.z) + (blockDim.x * threadIdx.y) + threadIdx.x;
207
+ r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
208
+ state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
209
+ state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
210
+ state->pIdx);
211
+
212
+ state->s[(t + state->offset + MTGPDC_N) & MTGP32_STATE_MASK] = r;
213
+ o = temper(state->k, r,
214
+ state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
215
+ state->pIdx);
216
+ NV_IF_TARGET(NV_IS_DEVICE,
217
+ __syncthreads();
218
+ )
219
+ if (t == 0)
220
+ {
221
+ state->offset = (state->offset + d) & MTGP32_STATE_MASK;
222
+ }
223
+ NV_IF_TARGET(NV_IS_DEVICE,
224
+ __syncthreads();
225
+ )
226
+ return o;
227
+
228
+ }
229
+ /**
230
+ * \brief Return 32-bits of pseudorandomness from a specific position in a mtgp32 generator.
231
+ *
232
+ * Return 32-bits of pseudorandomness from position \p index of the mtgp32 generator in \p state,
233
+ * increment position of generator by \p n positions, which must be the total number of positions
234
+ * upddated in the state by the thread block, for this invocation.
235
+ *
236
+ * Note :
237
+ * Thread indices must range from 0...\ n - 1.
238
+ * The number of positions updated may not exceed 256.
239
+ * A thread block may update more than one state, but a given state may not be updated by more than one thread block.
240
+ *
241
+ * \param state - Pointer to state to update
242
+ * \param index - Index (0..255) of the position within the state to draw from and update
243
+ * \param n - The total number of postions in this state that are being updated by this invocation
244
+ *
245
+ * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use.
246
+ */
247
+ QUALIFIERS unsigned int curand_mtgp32_specific(curandStateMtgp32_t *state, unsigned char index, unsigned char n)
248
+ {
249
+ unsigned int t;
250
+ int pos = state->k->pos_tbl[state->pIdx];
251
+ unsigned int r;
252
+ unsigned int o;
253
+
254
+ t = index;
255
+ r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
256
+ state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
257
+ state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
258
+ state->pIdx);
259
+
260
+ state->s[(t + state->offset + MTGPDC_N) & MTGP32_STATE_MASK] = r;
261
+ o = temper(state->k, r,
262
+ state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
263
+ state->pIdx);
264
+ NV_IF_TARGET(NV_IS_DEVICE,
265
+ __syncthreads();
266
+ )
267
+ if (index == 0)
268
+ {
269
+ state->offset = (state->offset + n) & MTGP32_STATE_MASK;
270
+ }
271
+ NV_IF_TARGET(NV_IS_DEVICE,
272
+ __syncthreads();
273
+ )
274
+ return o;
275
+ }
276
+ /**
277
+ * \brief Return a uniformly distributed float from a mtgp32 generator.
278
+ *
279
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
280
+ * from the mtgp32 generator in \p state, increment position of generator.
281
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
282
+ * point outputs are never returned.
283
+ *
284
+ * Note: This alternate derivation of a uniform float is provided for completeness
285
+ * with the original source
286
+ *
287
+ * \param state - Pointer to state to update
288
+ *
289
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
290
+ */
291
+ QUALIFIERS float curand_mtgp32_single(curandStateMtgp32_t *state)
292
+ {
293
+ unsigned int t;
294
+ unsigned int d;
295
+ int pos = state->k->pos_tbl[state->pIdx];
296
+ unsigned int r;
297
+ unsigned int o_u;
298
+ float o_f;
299
+
300
+
301
+ t = blockDim.z * blockDim.y;
302
+ d = t * blockDim.x;
303
+ //assert( d <= 256 );
304
+ t += threadIdx.x;
305
+ r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
306
+ state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
307
+ state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
308
+ state->pIdx);
309
+
310
+ state->s[t] = r;
311
+ o_u = temper_single(state->k, r,
312
+ state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
313
+ state->pIdx);
314
+ NV_IF_TARGET(NV_IS_DEVICE,
315
+ __syncthreads();
316
+ )
317
+ if (threadIdx.x == 0)
318
+ {
319
+ state->offset = (state->offset + d) & MTGP32_STATE_MASK;
320
+ }
321
+ NV_IF_TARGET(NV_IS_DEVICE,
322
+ __syncthreads();
323
+ )
324
+ memcpy(&o_f, &o_u, sizeof(o_u));
325
+ return o_f;
326
+ }
327
+
328
+ /**
329
+ * \brief Return a uniformly distributed float from a specific position in a mtgp32 generator.
330
+ *
331
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
332
+ * from position \p index of the mtgp32 generator in \p state, and
333
+ * increment position of generator by \p n positions, which must be the total number of positions
334
+ * upddated in the state by the thread block, for this invocation.
335
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
336
+ * point outputs are never returned.
337
+ *
338
+ * Note 1:
339
+ * Thread indices must range from 0...\p n - 1.
340
+ * The number of positions updated may not exceed 256.
341
+ * A thread block may update more than one state, but a given state may not be updated by more than one thread block.
342
+ *
343
+ * Note 2: This alternate derivation of a uniform float is provided for completeness
344
+ * with the original source
345
+ *
346
+ * \param state - Pointer to state to update
347
+ * \param index - Index (0..255) of the position within the state to draw from and update
348
+ * \param n - The total number of postions in this state that are being updated by this invocation
349
+ *
350
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
351
+ */
352
+ QUALIFIERS float curand_mtgp32_single_specific(curandStateMtgp32_t *state, unsigned char index, unsigned char n)
353
+ {
354
+ unsigned int t;
355
+ int pos = state->k->pos_tbl[state->pIdx];
356
+ unsigned int r;
357
+ unsigned int o_u;
358
+ float o_f;
359
+
360
+ t = index;
361
+ r = para_rec(state->k, state->s[(t + state->offset) & MTGP32_STATE_MASK],
362
+ state->s[(t + state->offset + 1) & MTGP32_STATE_MASK],
363
+ state->s[(t + state->offset + pos) & MTGP32_STATE_MASK],
364
+ state->pIdx);
365
+
366
+ state->s[t] = r;
367
+ o_u = temper_single(state->k, r,
368
+ state->s[(t + state->offset + pos -1) & MTGP32_STATE_MASK],
369
+ state->pIdx);
370
+ NV_IF_TARGET(NV_IS_DEVICE,
371
+ __syncthreads();
372
+ )
373
+ if (threadIdx.x == 0)
374
+ {
375
+ state->offset = (state->offset + n) & MTGP32_STATE_MASK;
376
+ }
377
+ NV_IF_TARGET(NV_IS_DEVICE,
378
+ __syncthreads();
379
+ )
380
+ memcpy(&o_f, &o_u, sizeof(o_u));
381
+ return o_f;
382
+ }
383
+
384
+ /** @} */
385
+
386
+ #endif
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32dc_p_11213.h ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_normal.h ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #if !defined(CURAND_NORMAL_H_)
52
+ #define CURAND_NORMAL_H_
53
+
54
+ /**
55
+ * \defgroup DEVICE Device API
56
+ *
57
+ * @{
58
+ */
59
+
60
+ #ifndef __CUDACC_RTC__
61
+ #include <math.h>
62
+ #endif // __CUDACC_RTC__
63
+ #include <nv/target>
64
+
65
+ #include "curand_mrg32k3a.h"
66
+ #include "curand_mtgp32_kernel.h"
67
+ #include "curand_philox4x32_x.h"
68
+ #include "curand_normal_static.h"
69
+
70
+ QUALIFIERS float2 _curand_box_muller(unsigned int x, unsigned int y)
71
+ {
72
+ float2 result;
73
+ float u = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2);
74
+ float v = y * CURAND_2POW32_INV_2PI + (CURAND_2POW32_INV_2PI/2);
75
+ float s;
76
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
77
+ s = sqrtf(-2.0f * logf(u));
78
+ __sincosf(v, &result.x, &result.y);
79
+ ,
80
+ s = sqrtf(-2.0f * logf(u));
81
+ result.x = sinf(v);
82
+ result.y = cosf(v);
83
+ )
84
+ result.x *= s;
85
+ result.y *= s;
86
+ return result;
87
+ }
88
+
89
+ QUALIFIERS float2 curand_box_muller_mrg(curandStateMRG32k3a_t * state)
90
+ {
91
+ float x, y;
92
+ x = curand_uniform(state);
93
+ y = curand_uniform(state) * CURAND_2PI;
94
+ float2 result;
95
+ float s;
96
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
97
+ s = sqrtf(-2.0f * logf(x));
98
+ __sincosf(y, &result.x, &result.y);
99
+ ,
100
+ s = sqrtf(-2.0f * logf(x));
101
+ result.x = sinf(y);
102
+ result.y = cosf(y);
103
+ )
104
+ result.x *= s;
105
+ result.y *= s;
106
+ return result;
107
+ }
108
+
109
+ QUALIFIERS double2
110
+ _curand_box_muller_double(unsigned int x0, unsigned int x1,
111
+ unsigned int y0, unsigned int y1)
112
+ {
113
+ double2 result;
114
+ unsigned long long zx = (unsigned long long)x0 ^
115
+ ((unsigned long long)x1 << (53 - 32));
116
+ double u = zx * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
117
+ unsigned long long zy = (unsigned long long)y0 ^
118
+ ((unsigned long long)y1 << (53 - 32));
119
+ double v = zy * (CURAND_2POW53_INV_DOUBLE*2.0) + CURAND_2POW53_INV_DOUBLE;
120
+ double s = sqrt(-2.0 * log(u));
121
+
122
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
123
+ sincospi(v, &result.x, &result.y);
124
+ ,
125
+ result.x = sin(v*CURAND_PI_DOUBLE);
126
+ result.y = cos(v*CURAND_PI_DOUBLE);
127
+ )
128
+ result.x *= s;
129
+ result.y *= s;
130
+
131
+ return result;
132
+ }
133
+
134
+ QUALIFIERS double2
135
+ curand_box_muller_mrg_double(curandStateMRG32k3a_t * state)
136
+ {
137
+ double x, y;
138
+ double2 result;
139
+ x = curand_uniform_double(state);
140
+ y = curand_uniform_double(state) * 2.0;
141
+
142
+ double s = sqrt(-2.0 * log(x));
143
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
144
+ sincospi(y, &result.x, &result.y);
145
+ ,
146
+ result.x = sin(y*CURAND_PI_DOUBLE);
147
+ result.y = cos(y*CURAND_PI_DOUBLE);
148
+ )
149
+ result.x *= s;
150
+ result.y *= s;
151
+ return result;
152
+ }
153
+
154
+ template <typename R>
155
+ QUALIFIERS float2 curand_box_muller(R *state)
156
+ {
157
+ float2 result;
158
+ unsigned int x = curand(state);
159
+ unsigned int y = curand(state);
160
+ result = _curand_box_muller(x, y);
161
+ return result;
162
+ }
163
+
164
+ template <typename R>
165
+ QUALIFIERS float4 curand_box_muller4(R *state)
166
+ {
167
+ float4 result;
168
+ float2 _result;
169
+ uint4 x = curand4(state);
170
+ //unsigned int y = curand(state);
171
+ _result = _curand_box_muller(x.x, x.y);
172
+ result.x = _result.x;
173
+ result.y = _result.y;
174
+ _result = _curand_box_muller(x.z, x.w);
175
+ result.z = _result.x;
176
+ result.w = _result.y;
177
+ return result;
178
+ }
179
+
180
+ template <typename R>
181
+ QUALIFIERS double2 curand_box_muller_double(R *state)
182
+ {
183
+ double2 result;
184
+ unsigned int x0 = curand(state);
185
+ unsigned int x1 = curand(state);
186
+ unsigned int y0 = curand(state);
187
+ unsigned int y1 = curand(state);
188
+ result = _curand_box_muller_double(x0, x1, y0, y1);
189
+ return result;
190
+ }
191
+
192
+ template <typename R>
193
+ QUALIFIERS double2 curand_box_muller2_double(R *state)
194
+ {
195
+ double2 result;
196
+ uint4 _x;
197
+ _x = curand4(state);
198
+ result = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
199
+ return result;
200
+ }
201
+
202
+
203
+ template <typename R>
204
+ QUALIFIERS double4 curand_box_muller4_double(R *state)
205
+ {
206
+ double4 result;
207
+ double2 _res1;
208
+ double2 _res2;
209
+ uint4 _x;
210
+ uint4 _y;
211
+ _x = curand4(state);
212
+ _y = curand4(state);
213
+ _res1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
214
+ _res2 = _curand_box_muller_double(_y.x, _y.y, _y.z, _y.w);
215
+ result.x = _res1.x;
216
+ result.y = _res1.y;
217
+ result.z = _res2.x;
218
+ result.w = _res2.y;
219
+ return result;
220
+ }
221
+
222
+ //QUALIFIERS float _curand_normal_icdf(unsigned int x)
223
+ //{
224
+ //#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
225
+ // float s = CURAND_SQRT2;
226
+ // // Mirror to avoid loss of precision
227
+ // if(x > 0x80000000UL) {
228
+ // x = 0xffffffffUL - x;
229
+ // s = -s;
230
+ // }
231
+ // float p = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
232
+ // // p is in (0, 0.5], 2p is in (0, 1]
233
+ // return s * erfcinvf(2.0f * p);
234
+ //#else
235
+ // x++; //suppress warnings
236
+ // return 0.0f;
237
+ //#endif
238
+ //}
239
+ //
240
+ //QUALIFIERS float _curand_normal_icdf(unsigned long long x)
241
+ //{
242
+ //#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
243
+ // unsigned int t = (unsigned int)(x >> 32);
244
+ // float s = CURAND_SQRT2;
245
+ // // Mirror to avoid loss of precision
246
+ // if(t > 0x80000000UL) {
247
+ // t = 0xffffffffUL - t;
248
+ // s = -s;
249
+ // }
250
+ // float p = t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
251
+ // // p is in (0, 0.5], 2p is in (0, 1]
252
+ // return s * erfcinvf(2.0f * p);
253
+ //#else
254
+ // x++;
255
+ // return 0.0f;
256
+ //#endif
257
+ //}
258
+ //
259
+ //QUALIFIERS double _curand_normal_icdf_double(unsigned int x)
260
+ //{
261
+ //#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
262
+ // double s = CURAND_SQRT2_DOUBLE;
263
+ // // Mirror to avoid loss of precision
264
+ // if(x > 0x80000000UL) {
265
+ // x = 0xffffffffUL - x;
266
+ // s = -s;
267
+ // }
268
+ // double p = x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0);
269
+ // // p is in (0, 0.5], 2p is in (0, 1]
270
+ // return s * erfcinv(2.0 * p);
271
+ //#else
272
+ // x++;
273
+ // return 0.0;
274
+ //#endif
275
+ //}
276
+ //
277
+ //QUALIFIERS double _curand_normal_icdf_double(unsigned long long x)
278
+ //{
279
+ //#if __CUDA_ARCH__ > 0 || defined(HOST_HAVE_ERFCINVF)
280
+ // double s = CURAND_SQRT2_DOUBLE;
281
+ // x >>= 11;
282
+ // // Mirror to avoid loss of precision
283
+ // if(x > 0x10000000000000UL) {
284
+ // x = 0x1fffffffffffffUL - x;
285
+ // s = -s;
286
+ // }
287
+ // double p = x * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
288
+ // // p is in (0, 0.5], 2p is in (0, 1]
289
+ // return s * erfcinv(2.0 * p);
290
+ //#else
291
+ // x++;
292
+ // return 0.0;
293
+ //#endif
294
+ //}
295
+ //
296
+
297
+ /**
298
+ * \brief Return a normally distributed float from an XORWOW generator.
299
+ *
300
+ * Return a single normally distributed float with mean \p 0.0f and
301
+ * standard deviation \p 1.0f from the XORWOW generator in \p state,
302
+ * increment position of generator by one.
303
+ *
304
+ * The implementation uses a Box-Muller transform to generate two
305
+ * normally distributed results, then returns them one at a time.
306
+ * See ::curand_normal2() for a more efficient version that returns
307
+ * both results at once.
308
+ *
309
+ * \param state - Pointer to state to update
310
+ *
311
+ * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
312
+ */
313
+ QUALIFIERS float curand_normal(curandStateXORWOW_t *state)
314
+ {
315
+ if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) {
316
+ unsigned int x, y;
317
+ x = curand(state);
318
+ y = curand(state);
319
+ float2 v = _curand_box_muller(x, y);
320
+ state->boxmuller_extra = v.y;
321
+ state->boxmuller_flag = EXTRA_FLAG_NORMAL;
322
+ return v.x;
323
+ }
324
+ state->boxmuller_flag = 0;
325
+ return state->boxmuller_extra;
326
+ }
327
+
328
+ /**
329
+ * \brief Return a normally distributed float from an Philox4_32_10 generator.
330
+ *
331
+ * Return a single normally distributed float with mean \p 0.0f and
332
+ * standard deviation \p 1.0f from the Philox4_32_10 generator in \p state,
333
+ * increment position of generator by one.
334
+ *
335
+ * The implementation uses a Box-Muller transform to generate two
336
+ * normally distributed results, then returns them one at a time.
337
+ * See ::curand_normal2() for a more efficient version that returns
338
+ * both results at once.
339
+ *
340
+ * \param state - Pointer to state to update
341
+ *
342
+ * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
343
+ */
344
+
345
+ QUALIFIERS float curand_normal(curandStatePhilox4_32_10_t *state)
346
+ {
347
+ if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) {
348
+ unsigned int x, y;
349
+ x = curand(state);
350
+ y = curand(state);
351
+ float2 v = _curand_box_muller(x, y);
352
+ state->boxmuller_extra = v.y;
353
+ state->boxmuller_flag = EXTRA_FLAG_NORMAL;
354
+ return v.x;
355
+ }
356
+ state->boxmuller_flag = 0;
357
+ return state->boxmuller_extra;
358
+ }
359
+
360
+
361
+
362
+ /**
363
+ * \brief Return a normally distributed float from an MRG32k3a generator.
364
+ *
365
+ * Return a single normally distributed float with mean \p 0.0f and
366
+ * standard deviation \p 1.0f from the MRG32k3a generator in \p state,
367
+ * increment position of generator by one.
368
+ *
369
+ * The implementation uses a Box-Muller transform to generate two
370
+ * normally distributed results, then returns them one at a time.
371
+ * See ::curand_normal2() for a more efficient version that returns
372
+ * both results at once.
373
+ *
374
+ * \param state - Pointer to state to update
375
+ *
376
+ * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
377
+ */
378
+ QUALIFIERS float curand_normal(curandStateMRG32k3a_t *state)
379
+ {
380
+ if(state->boxmuller_flag != EXTRA_FLAG_NORMAL) {
381
+ float2 v = curand_box_muller_mrg(state);
382
+ state->boxmuller_extra = v.y;
383
+ state->boxmuller_flag = EXTRA_FLAG_NORMAL;
384
+ return v.x;
385
+ }
386
+ state->boxmuller_flag = 0;
387
+ return state->boxmuller_extra;
388
+ }
389
+
390
+ /**
391
+ * \brief Return two normally distributed floats from an XORWOW generator.
392
+ *
393
+ * Return two normally distributed floats with mean \p 0.0f and
394
+ * standard deviation \p 1.0f from the XORWOW generator in \p state,
395
+ * increment position of generator by two.
396
+ *
397
+ * The implementation uses a Box-Muller transform to generate two
398
+ * normally distributed results.
399
+ *
400
+ * \param state - Pointer to state to update
401
+ *
402
+ * \return Normally distributed float2 where each element is from a
403
+ * distribution with mean \p 0.0f and standard deviation \p 1.0f
404
+ */
405
+ QUALIFIERS float2 curand_normal2(curandStateXORWOW_t *state)
406
+ {
407
+ return curand_box_muller(state);
408
+ }
409
+ /**
410
+ * \brief Return two normally distributed floats from an Philox4_32_10 generator.
411
+ *
412
+ * Return two normally distributed floats with mean \p 0.0f and
413
+ * standard deviation \p 1.0f from the Philox4_32_10 generator in \p state,
414
+ * increment position of generator by two.
415
+ *
416
+ * The implementation uses a Box-Muller transform to generate two
417
+ * normally distributed results.
418
+ *
419
+ * \param state - Pointer to state to update
420
+ *
421
+ * \return Normally distributed float2 where each element is from a
422
+ * distribution with mean \p 0.0f and standard deviation \p 1.0f
423
+ */
424
+ QUALIFIERS float2 curand_normal2(curandStatePhilox4_32_10_t *state)
425
+ {
426
+ return curand_box_muller(state);
427
+ }
428
+
429
+ /**
430
+ * \brief Return four normally distributed floats from an Philox4_32_10 generator.
431
+ *
432
+ * Return four normally distributed floats with mean \p 0.0f and
433
+ * standard deviation \p 1.0f from the Philox4_32_10 generator in \p state,
434
+ * increment position of generator by four.
435
+ *
436
+ * The implementation uses a Box-Muller transform to generate two
437
+ * normally distributed results.
438
+ *
439
+ * \param state - Pointer to state to update
440
+ *
441
+ * \return Normally distributed float2 where each element is from a
442
+ * distribution with mean \p 0.0f and standard deviation \p 1.0f
443
+ */
444
+ QUALIFIERS float4 curand_normal4(curandStatePhilox4_32_10_t *state)
445
+ {
446
+ return curand_box_muller4(state);
447
+ }
448
+
449
+
450
+
451
+ /**
452
+ * \brief Return two normally distributed floats from an MRG32k3a generator.
453
+ *
454
+ * Return two normally distributed floats with mean \p 0.0f and
455
+ * standard deviation \p 1.0f from the MRG32k3a generator in \p state,
456
+ * increment position of generator by two.
457
+ *
458
+ * The implementation uses a Box-Muller transform to generate two
459
+ * normally distributed results.
460
+ *
461
+ * \param state - Pointer to state to update
462
+ *
463
+ * \return Normally distributed float2 where each element is from a
464
+ * distribution with mean \p 0.0f and standard deviation \p 1.0f
465
+ */
466
+ QUALIFIERS float2 curand_normal2(curandStateMRG32k3a_t *state)
467
+ {
468
+ return curand_box_muller_mrg(state);
469
+ }
470
+
471
+ /**
472
+ * \brief Return a normally distributed float from a MTGP32 generator.
473
+ *
474
+ * Return a single normally distributed float with mean \p 0.0f and
475
+ * standard deviation \p 1.0f from the MTGP32 generator in \p state,
476
+ * increment position of generator.
477
+ *
478
+ * The implementation uses the inverse cumulative distribution function
479
+ * to generate normally distributed results.
480
+ *
481
+ * \param state - Pointer to state to update
482
+ *
483
+ * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
484
+ */
485
+ QUALIFIERS float curand_normal(curandStateMtgp32_t *state)
486
+ {
487
+ return _curand_normal_icdf(curand(state));
488
+ }
489
+ /**
490
+ * \brief Return a normally distributed float from a Sobol32 generator.
491
+ *
492
+ * Return a single normally distributed float with mean \p 0.0f and
493
+ * standard deviation \p 1.0f from the Sobol32 generator in \p state,
494
+ * increment position of generator by one.
495
+ *
496
+ * The implementation uses the inverse cumulative distribution function
497
+ * to generate normally distributed results.
498
+ *
499
+ * \param state - Pointer to state to update
500
+ *
501
+ * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
502
+ */
503
+ QUALIFIERS float curand_normal(curandStateSobol32_t *state)
504
+ {
505
+ return _curand_normal_icdf(curand(state));
506
+ }
507
+
508
+ /**
509
+ * \brief Return a normally distributed float from a scrambled Sobol32 generator.
510
+ *
511
+ * Return a single normally distributed float with mean \p 0.0f and
512
+ * standard deviation \p 1.0f from the scrambled Sobol32 generator in \p state,
513
+ * increment position of generator by one.
514
+ *
515
+ * The implementation uses the inverse cumulative distribution function
516
+ * to generate normally distributed results.
517
+ *
518
+ * \param state - Pointer to state to update
519
+ *
520
+ * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
521
+ */
522
+ QUALIFIERS float curand_normal(curandStateScrambledSobol32_t *state)
523
+ {
524
+ return _curand_normal_icdf(curand(state));
525
+ }
526
+
527
+ /**
528
+ * \brief Return a normally distributed float from a Sobol64 generator.
529
+ *
530
+ * Return a single normally distributed float with mean \p 0.0f and
531
+ * standard deviation \p 1.0f from the Sobol64 generator in \p state,
532
+ * increment position of generator by one.
533
+ *
534
+ * The implementation uses the inverse cumulative distribution function
535
+ * to generate normally distributed results.
536
+ *
537
+ * \param state - Pointer to state to update
538
+ *
539
+ * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
540
+ */
541
+ QUALIFIERS float curand_normal(curandStateSobol64_t *state)
542
+ {
543
+ return _curand_normal_icdf(curand(state));
544
+ }
545
+
546
+ /**
547
+ * \brief Return a normally distributed float from a scrambled Sobol64 generator.
548
+ *
549
+ * Return a single normally distributed float with mean \p 0.0f and
550
+ * standard deviation \p 1.0f from the scrambled Sobol64 generator in \p state,
551
+ * increment position of generator by one.
552
+ *
553
+ * The implementation uses the inverse cumulative distribution function
554
+ * to generate normally distributed results.
555
+ *
556
+ * \param state - Pointer to state to update
557
+ *
558
+ * \return Normally distributed float with mean \p 0.0f and standard deviation \p 1.0f
559
+ */
560
+ QUALIFIERS float curand_normal(curandStateScrambledSobol64_t *state)
561
+ {
562
+ return _curand_normal_icdf(curand(state));
563
+ }
564
+
565
+ /**
566
+ * \brief Return a normally distributed double from an XORWOW generator.
567
+ *
568
+ * Return a single normally distributed double with mean \p 0.0 and
569
+ * standard deviation \p 1.0 from the XORWOW generator in \p state,
570
+ * increment position of generator.
571
+ *
572
+ * The implementation uses a Box-Muller transform to generate two
573
+ * normally distributed results, then returns them one at a time.
574
+ * See ::curand_normal2_double() for a more efficient version that returns
575
+ * both results at once.
576
+ *
577
+ * \param state - Pointer to state to update
578
+ *
579
+ * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
580
+ */
581
+ QUALIFIERS double curand_normal_double(curandStateXORWOW_t *state)
582
+ {
583
+ if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) {
584
+ unsigned int x0, x1, y0, y1;
585
+ x0 = curand(state);
586
+ x1 = curand(state);
587
+ y0 = curand(state);
588
+ y1 = curand(state);
589
+ double2 v = _curand_box_muller_double(x0, x1, y0, y1);
590
+ state->boxmuller_extra_double = v.y;
591
+ state->boxmuller_flag_double = EXTRA_FLAG_NORMAL;
592
+ return v.x;
593
+ }
594
+ state->boxmuller_flag_double = 0;
595
+ return state->boxmuller_extra_double;
596
+ }
597
+
598
+ /**
599
+ * \brief Return a normally distributed double from an Philox4_32_10 generator.
600
+ *
601
+ * Return a single normally distributed double with mean \p 0.0 and
602
+ * standard deviation \p 1.0 from the Philox4_32_10 generator in \p state,
603
+ * increment position of generator.
604
+ *
605
+ * The implementation uses a Box-Muller transform to generate two
606
+ * normally distributed results, then returns them one at a time.
607
+ * See ::curand_normal2_double() for a more efficient version that returns
608
+ * both results at once.
609
+ *
610
+ * \param state - Pointer to state to update
611
+ *
612
+ * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
613
+ */
614
+
615
+ QUALIFIERS double curand_normal_double(curandStatePhilox4_32_10_t *state)
616
+ {
617
+ if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) {
618
+ uint4 _x;
619
+ _x = curand4(state);
620
+ double2 v = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
621
+ state->boxmuller_extra_double = v.y;
622
+ state->boxmuller_flag_double = EXTRA_FLAG_NORMAL;
623
+ return v.x;
624
+ }
625
+ state->boxmuller_flag_double = 0;
626
+ return state->boxmuller_extra_double;
627
+ }
628
+
629
+
630
+ /**
631
+ * \brief Return a normally distributed double from an MRG32k3a generator.
632
+ *
633
+ * Return a single normally distributed double with mean \p 0.0 and
634
+ * standard deviation \p 1.0 from the XORWOW generator in \p state,
635
+ * increment position of generator.
636
+ *
637
+ * The implementation uses a Box-Muller transform to generate two
638
+ * normally distributed results, then returns them one at a time.
639
+ * See ::curand_normal2_double() for a more efficient version that returns
640
+ * both results at once.
641
+ *
642
+ * \param state - Pointer to state to update
643
+ *
644
+ * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
645
+ */
646
+ QUALIFIERS double curand_normal_double(curandStateMRG32k3a_t *state)
647
+ {
648
+ if(state->boxmuller_flag_double != EXTRA_FLAG_NORMAL) {
649
+ double2 v = curand_box_muller_mrg_double(state);
650
+ state->boxmuller_extra_double = v.y;
651
+ state->boxmuller_flag_double = EXTRA_FLAG_NORMAL;
652
+ return v.x;
653
+ }
654
+ state->boxmuller_flag_double = 0;
655
+ return state->boxmuller_extra_double;
656
+ }
657
+
658
+ /**
659
+ * \brief Return two normally distributed doubles from an XORWOW generator.
660
+ *
661
+ * Return two normally distributed doubles with mean \p 0.0 and
662
+ * standard deviation \p 1.0 from the XORWOW generator in \p state,
663
+ * increment position of generator by 2.
664
+ *
665
+ * The implementation uses a Box-Muller transform to generate two
666
+ * normally distributed results.
667
+ *
668
+ * \param state - Pointer to state to update
669
+ *
670
+ * \return Normally distributed double2 where each element is from a
671
+ * distribution with mean \p 0.0 and standard deviation \p 1.0
672
+ */
673
+ QUALIFIERS double2 curand_normal2_double(curandStateXORWOW_t *state)
674
+ {
675
+ return curand_box_muller_double(state);
676
+ }
677
+
678
+ /**
679
+ * \brief Return two normally distributed doubles from an Philox4_32_10 generator.
680
+ *
681
+ * Return two normally distributed doubles with mean \p 0.0 and
682
+ * standard deviation \p 1.0 from the Philox4_32_10 generator in \p state,
683
+ * increment position of generator by 2.
684
+ *
685
+ * The implementation uses a Box-Muller transform to generate two
686
+ * normally distributed results.
687
+ *
688
+ * \param state - Pointer to state to update
689
+ *
690
+ * \return Normally distributed double2 where each element is from a
691
+ * distribution with mean \p 0.0 and standard deviation \p 1.0
692
+ */
693
+ QUALIFIERS double2 curand_normal2_double(curandStatePhilox4_32_10_t *state)
694
+ {
695
+ uint4 _x;
696
+ double2 result;
697
+
698
+ _x = curand4(state);
699
+ double2 v1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
700
+ result.x = v1.x;
701
+ result.y = v1.y;
702
+
703
+ return result;
704
+ }
705
+
706
+ // not a part of API
707
+ QUALIFIERS double4 curand_normal4_double(curandStatePhilox4_32_10_t *state)
708
+ {
709
+ uint4 _x;
710
+ uint4 _y;
711
+ double4 result;
712
+
713
+ _x = curand4(state);
714
+ _y = curand4(state);
715
+ double2 v1 = _curand_box_muller_double(_x.x, _x.y, _x.z, _x.w);
716
+ double2 v2 = _curand_box_muller_double(_y.x, _y.y, _y.z, _y.w);
717
+ result.x = v1.x;
718
+ result.y = v1.y;
719
+ result.z = v2.x;
720
+ result.w = v2.y;
721
+
722
+ return result;
723
+ }
724
+
725
+
726
+ /**
727
+ * \brief Return two normally distributed doubles from an MRG32k3a generator.
728
+ *
729
+ * Return two normally distributed doubles with mean \p 0.0 and
730
+ * standard deviation \p 1.0 from the MRG32k3a generator in \p state,
731
+ * increment position of generator.
732
+ *
733
+ * The implementation uses a Box-Muller transform to generate two
734
+ * normally distributed results.
735
+ *
736
+ * \param state - Pointer to state to update
737
+ *
738
+ * \return Normally distributed double2 where each element is from a
739
+ * distribution with mean \p 0.0 and standard deviation \p 1.0
740
+ */
741
+ QUALIFIERS double2 curand_normal2_double(curandStateMRG32k3a_t *state)
742
+ {
743
+ return curand_box_muller_mrg_double(state);
744
+ }
745
+
746
+ /**
747
+ * \brief Return a normally distributed double from an MTGP32 generator.
748
+ *
749
+ * Return a single normally distributed double with mean \p 0.0 and
750
+ * standard deviation \p 1.0 from the MTGP32 generator in \p state,
751
+ * increment position of generator.
752
+ *
753
+ * The implementation uses the inverse cumulative distribution function
754
+ * to generate normally distributed results.
755
+ *
756
+ * \param state - Pointer to state to update
757
+ *
758
+ * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
759
+ */
760
+ QUALIFIERS double curand_normal_double(curandStateMtgp32_t *state)
761
+ {
762
+ return _curand_normal_icdf_double(curand(state));
763
+ }
764
+
765
+ /**
766
+ * \brief Return a normally distributed double from an Sobol32 generator.
767
+ *
768
+ * Return a single normally distributed double with mean \p 0.0 and
769
+ * standard deviation \p 1.0 from the Sobol32 generator in \p state,
770
+ * increment position of generator by one.
771
+ *
772
+ * The implementation uses the inverse cumulative distribution function
773
+ * to generate normally distributed results.
774
+ *
775
+ * \param state - Pointer to state to update
776
+ *
777
+ * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
778
+ */
779
+ QUALIFIERS double curand_normal_double(curandStateSobol32_t *state)
780
+ {
781
+ return _curand_normal_icdf_double(curand(state));
782
+ }
783
+
784
+ /**
785
+ * \brief Return a normally distributed double from a scrambled Sobol32 generator.
786
+ *
787
+ * Return a single normally distributed double with mean \p 0.0 and
788
+ * standard deviation \p 1.0 from the scrambled Sobol32 generator in \p state,
789
+ * increment position of generator by one.
790
+ *
791
+ * The implementation uses the inverse cumulative distribution function
792
+ * to generate normally distributed results.
793
+ *
794
+ * \param state - Pointer to state to update
795
+ *
796
+ * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
797
+ */
798
+ QUALIFIERS double curand_normal_double(curandStateScrambledSobol32_t *state)
799
+ {
800
+ return _curand_normal_icdf_double(curand(state));
801
+ }
802
+
803
+ /**
804
+ * \brief Return a normally distributed double from a Sobol64 generator.
805
+ *
806
+ * Return a single normally distributed double with mean \p 0.0 and
807
+ * standard deviation \p 1.0 from the Sobol64 generator in \p state,
808
+ * increment position of generator by one.
809
+ *
810
+ * The implementation uses the inverse cumulative distribution function
811
+ * to generate normally distributed results.
812
+ *
813
+ * \param state - Pointer to state to update
814
+ *
815
+ * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
816
+ */
817
+ QUALIFIERS double curand_normal_double(curandStateSobol64_t *state)
818
+ {
819
+ return _curand_normal_icdf_double(curand(state));
820
+ }
821
+
822
+ /**
823
+ * \brief Return a normally distributed double from a scrambled Sobol64 generator.
824
+ *
825
+ * Return a single normally distributed double with mean \p 0.0 and
826
+ * standard deviation \p 1.0 from the scrambled Sobol64 generator in \p state,
827
+ * increment position of generator by one.
828
+ *
829
+ * The implementation uses the inverse cumulative distribution function
830
+ * to generate normally distributed results.
831
+ *
832
+ * \param state - Pointer to state to update
833
+ *
834
+ * \return Normally distributed double with mean \p 0.0 and standard deviation \p 1.0
835
+ */
836
+ QUALIFIERS double curand_normal_double(curandStateScrambledSobol64_t *state)
837
+ {
838
+ return _curand_normal_icdf_double(curand(state));
839
+ }
840
+ #endif // !defined(CURAND_NORMAL_H_)
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_normal_static.h ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+ #ifndef CURAND_NORMAL_STATIC_H
49
+ #define CURAND_NORMAL_STATIC_H
50
+
51
+ #define QUALIFIERS_STATIC __host__ __device__ __forceinline__
52
+
53
+ #include <nv/target>
54
+ #if defined(HOST_HAVE_ERFCINVF)
55
+ #define IF_DEVICE_OR_HAVE_ERFCINVF(t, f) _NV_BLOCK_EXPAND(t)
56
+ #else
57
+ #define IF_DEVICE_OR_HAVE_ERFCINVF(t, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, t, f)
58
+ #endif
59
+
60
+ QUALIFIERS_STATIC float _curand_normal_icdf(unsigned int x)
61
+ {
62
+ IF_DEVICE_OR_HAVE_ERFCINVF(
63
+ float s = CURAND_SQRT2;
64
+ // Mirror to avoid loss of precision
65
+ if(x > 0x80000000UL) {
66
+ x = 0xffffffffUL - x;
67
+ s = -s;
68
+ }
69
+ float p = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
70
+ // p is in (0, 0.5], 2p is in (0, 1]
71
+ return s * erfcinvf(2.0f * p);
72
+ ,
73
+ x++; //suppress warnings
74
+ return 0.0f;
75
+ )
76
+ }
77
+
78
+ QUALIFIERS_STATIC float _curand_normal_icdf(unsigned long long x)
79
+ {
80
+ IF_DEVICE_OR_HAVE_ERFCINVF(
81
+ unsigned int t = (unsigned int)(x >> 32);
82
+ float s = CURAND_SQRT2;
83
+ // Mirror to avoid loss of precision
84
+ if(t > 0x80000000UL) {
85
+ t = 0xffffffffUL - t;
86
+ s = -s;
87
+ }
88
+ float p = t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
89
+ // p is in (0 - 0.5] 2p is in (0 - 1]
90
+ return s * erfcinvf(2.0f * p);
91
+ ,
92
+ x++;
93
+ return 0.0f;
94
+ )
95
+ }
96
+
97
+ QUALIFIERS_STATIC double _curand_normal_icdf_double(unsigned int x)
98
+ {
99
+ IF_DEVICE_OR_HAVE_ERFCINVF(
100
+ double s = CURAND_SQRT2_DOUBLE;
101
+ // Mirror to avoid loss of precision
102
+ if(x > 0x80000000UL) {
103
+ x = 0xffffffffUL - x;
104
+ s = -s;
105
+ }
106
+ double p = x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0);
107
+ // p is in (0 - 0.5] 2p is in (0 - 1]
108
+ return s * erfcinv(2.0 * p);
109
+ ,
110
+ x++;
111
+ return 0.0;
112
+ )
113
+ }
114
+
115
+ QUALIFIERS_STATIC double _curand_normal_icdf_double(unsigned long long x)
116
+ {
117
+ IF_DEVICE_OR_HAVE_ERFCINVF(
118
+ double s = CURAND_SQRT2_DOUBLE;
119
+ x >>= 11;
120
+ // Mirror to avoid loss of precision
121
+ if(x > 0x10000000000000UL) {
122
+ x = 0x1fffffffffffffUL - x;
123
+ s = -s;
124
+ }
125
+ double p = x * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
126
+ // p is in (0 - 0.5] 2p is in (0 - 1]
127
+ return s * erfcinv(2.0 * p);
128
+ ,
129
+ x++;
130
+ return 0.0;
131
+ )
132
+ }
133
+ #undef QUALIFIERS_STATIC
134
+ #endif
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_philox4x32_x.h ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+ /*
49
+ Copyright 2010-2011, D. E. Shaw Research.
50
+ All rights reserved.
51
+
52
+ Redistribution and use in source and binary forms, with or without
53
+ modification, are permitted provided that the following conditions are
54
+ met:
55
+
56
+ * Redistributions of source code must retain the above copyright
57
+ notice, this list of conditions, and the following disclaimer.
58
+
59
+ * Redistributions in binary form must reproduce the above copyright
60
+ notice, this list of conditions, and the following disclaimer in the
61
+ documentation and/or other materials provided with the distribution.
62
+
63
+ * Neither the name of D. E. Shaw Research nor the names of its
64
+ contributors may be used to endorse or promote products derived from
65
+ this software without specific prior written permission.
66
+
67
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
68
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
69
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
70
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
71
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
72
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
73
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
74
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
75
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
76
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
77
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
78
+ */
79
+
80
+ #ifndef CURAND_PHILOX4X32_X__H_
81
+ #define CURAND_PHILOX4X32_X__H_
82
+ #include <nv/target>
83
+
84
+ #if !defined(QUALIFIERS)
85
+ #define QUALIFIERS static __forceinline__ __device__
86
+ #endif
87
+
88
+ #define PHILOX_W32_0 (0x9E3779B9)
89
+ #define PHILOX_W32_1 (0xBB67AE85)
90
+ #define PHILOX_M4x32_0 (0xD2511F53)
91
+ #define PHILOX_M4x32_1 (0xCD9E8D57)
92
+
93
+ struct curandStatePhilox4_32_10 {
94
+ uint4 ctr;
95
+ uint4 output;
96
+ uint2 key;
97
+ unsigned int STATE;
98
+ int boxmuller_flag;
99
+ int boxmuller_flag_double;
100
+ float boxmuller_extra;
101
+ double boxmuller_extra_double;
102
+ };
103
+
104
+ typedef struct curandStatePhilox4_32_10 curandStatePhilox4_32_10_t;
105
+
106
+
107
+ QUALIFIERS void Philox_State_Incr(curandStatePhilox4_32_10_t* s, unsigned long long n)
108
+ {
109
+ unsigned int nlo = (unsigned int)(n);
110
+ unsigned int nhi = (unsigned int)(n>>32);
111
+
112
+ s->ctr.x += nlo;
113
+ if( s->ctr.x < nlo )
114
+ nhi++;
115
+
116
+ s->ctr.y += nhi;
117
+ if(nhi <= s->ctr.y)
118
+ return;
119
+ if(++s->ctr.z) return;
120
+ ++s->ctr.w;
121
+ }
122
+
123
+ QUALIFIERS void Philox_State_Incr_hi(curandStatePhilox4_32_10_t* s, unsigned long long n)
124
+ {
125
+ unsigned int nlo = (unsigned int)(n);
126
+ unsigned int nhi = (unsigned int)(n>>32);
127
+
128
+ s->ctr.z += nlo;
129
+ if( s->ctr.z < nlo )
130
+ nhi++;
131
+
132
+ s->ctr.w += nhi;
133
+ }
134
+
135
+
136
+
137
+ QUALIFIERS void Philox_State_Incr(curandStatePhilox4_32_10_t* s)
138
+ {
139
+ if(++s->ctr.x) return;
140
+ if(++s->ctr.y) return;
141
+ if(++s->ctr.z) return;
142
+ ++s->ctr.w;
143
+ }
144
+
145
+
146
+ QUALIFIERS unsigned int mulhilo32(unsigned int a, unsigned int b, unsigned int* hip)
147
+ {
148
+ NV_IF_ELSE_TARGET(NV_IS_HOST,
149
+ // host code
150
+ unsigned long long product = ((unsigned long long)a) * ((unsigned long long)b);
151
+ *hip = product >> 32;
152
+ return (unsigned int)product;
153
+ ,
154
+ // device code
155
+ *hip = __umulhi(a,b);
156
+ return a*b;
157
+ )
158
+ }
159
+
160
+ QUALIFIERS uint4 _philox4x32round(uint4 ctr, uint2 key)
161
+ {
162
+ unsigned int hi0;
163
+ unsigned int hi1;
164
+ unsigned int lo0 = mulhilo32(PHILOX_M4x32_0, ctr.x, &hi0);
165
+ unsigned int lo1 = mulhilo32(PHILOX_M4x32_1, ctr.z, &hi1);
166
+
167
+ uint4 ret = {hi1^ctr.y^key.x, lo1, hi0^ctr.w^key.y, lo0};
168
+ return ret;
169
+ }
170
+
171
+ QUALIFIERS uint4 curand_Philox4x32_10( uint4 c, uint2 k)
172
+ {
173
+ c = _philox4x32round(c, k); // 1
174
+ k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
175
+ c = _philox4x32round(c, k); // 2
176
+ k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
177
+ c = _philox4x32round(c, k); // 3
178
+ k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
179
+ c = _philox4x32round(c, k); // 4
180
+ k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
181
+ c = _philox4x32round(c, k); // 5
182
+ k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
183
+ c = _philox4x32round(c, k); // 6
184
+ k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
185
+ c = _philox4x32round(c, k); // 7
186
+ k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
187
+ c = _philox4x32round(c, k); // 8
188
+ k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
189
+ c = _philox4x32round(c, k); // 9
190
+ k.x += PHILOX_W32_0; k.y += PHILOX_W32_1;
191
+ return _philox4x32round(c, k); // 10
192
+ }
193
+
194
+
195
+ #endif
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_poisson.h ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #if !defined(CURAND_POISSON_H_)
52
+ #define CURAND_POISSON_H_
53
+
54
+ /**
55
+ * \defgroup DEVICE Device API
56
+ *
57
+ * @{
58
+ */
59
+
60
+ #ifndef __CUDACC_RTC__
61
+ #include <math.h>
62
+ #endif // __CUDACC_RTC__
63
+
64
+ #include <nv/target>
65
+
66
+ #include "curand_mrg32k3a.h"
67
+ #include "curand_mtgp32_kernel.h"
68
+ #include "curand_philox4x32_x.h"
69
+
70
+ #define CR_CUDART_PI 3.1415926535897931e+0
71
+ #define CR_CUDART_TWO_TO_52 4503599627370496.0
72
+
73
+
74
+ QUALIFIERS float __cr_rsqrt(float a)
75
+ {
76
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
77
+ asm ("rsqrt.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
78
+ ,
79
+ a = 1.0f / sqrtf (a);
80
+ )
81
+ return a;
82
+ }
83
+
84
+ QUALIFIERS float __cr_exp (float a)
85
+ {
86
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
87
+ a = a * 1.4426950408889634074;
88
+ asm ("ex2.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
89
+ ,
90
+ a = expf (a);
91
+ )
92
+ return a;
93
+ }
94
+
95
+ QUALIFIERS float __cr_log (float a)
96
+ {
97
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
98
+ asm ("lg2.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
99
+ a = a * 0.69314718055994530942;
100
+ ,
101
+ a = logf (a);
102
+ )
103
+ return a;
104
+ }
105
+
106
+ QUALIFIERS float __cr_rcp (float a)
107
+ {
108
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
109
+ asm ("rcp.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a));
110
+ ,
111
+ a = 1.0f / a;
112
+ )
113
+ return a;
114
+ }
115
+
116
+ /* Computes regularized gamma function: gammainc(a,x)/gamma(a) */
117
+ QUALIFIERS float __cr_pgammainc (float a, float x)
118
+ {
119
+ float t, alpha, beta;
120
+
121
+ /* First level parametrization constants */
122
+ float ma1 = 1.43248035075540910f,
123
+ ma2 = 0.12400979329415655f,
124
+ ma3 = 0.00025361074907033f,
125
+ mb1 = 0.21096734870196546f,
126
+ mb2 = 1.97381164089999420f,
127
+ mb3 = 0.94201734077887530f;
128
+
129
+ /* Second level parametrization constants (depends only on a) */
130
+
131
+ alpha = __cr_rsqrt (a - ma2);
132
+ alpha = ma1 * alpha + ma3;
133
+ beta = __cr_rsqrt (a - mb2);
134
+ beta = mb1 * beta + mb3;
135
+
136
+ /* Final approximation (depends on a and x) */
137
+
138
+ t = a - x;
139
+ t = alpha * t - beta;
140
+ t = 1.0f + __cr_exp (t);
141
+ t = t * t;
142
+ t = __cr_rcp (t);
143
+
144
+ /* Negative a,x or a,x=NAN requires special handling */
145
+ //t = !(x > 0 && a >= 0) ? 0.0 : t;
146
+
147
+ return t;
148
+ }
149
+
150
+ /* Computes inverse of pgammainc */
151
+ QUALIFIERS float __cr_pgammaincinv (float a, float y)
152
+ {
153
+ float t, alpha, beta;
154
+
155
+ /* First level parametrization constants */
156
+
157
+ float ma1 = 1.43248035075540910f,
158
+ ma2 = 0.12400979329415655f,
159
+ ma3 = 0.00025361074907033f,
160
+ mb1 = 0.21096734870196546f,
161
+ mb2 = 1.97381164089999420f,
162
+ mb3 = 0.94201734077887530f;
163
+
164
+ /* Second level parametrization constants (depends only on a) */
165
+
166
+ alpha = __cr_rsqrt (a - ma2);
167
+ alpha = ma1 * alpha + ma3;
168
+ beta = __cr_rsqrt (a - mb2);
169
+ beta = mb1 * beta + mb3;
170
+
171
+ /* Final approximation (depends on a and y) */
172
+
173
+ t = __cr_rsqrt (y) - 1.0f;
174
+ t = __cr_log (t);
175
+ t = beta + t;
176
+ t = - t * __cr_rcp (alpha) + a;
177
+ /* Negative a,x or a,x=NAN requires special handling */
178
+ //t = !(y > 0 && a >= 0) ? 0.0 : t;
179
+ return t;
180
+ }
181
+
182
+ #if defined(__CUDACC_RDC__) && (__cplusplus >= 201703L) && defined(__cpp_inline_variables)
183
+ inline __constant__ double __cr_lgamma_table [] = {
184
+ #else
185
+ static __constant__ double __cr_lgamma_table [] = {
186
+ #endif
187
+ 0.000000000000000000e-1,
188
+ 0.000000000000000000e-1,
189
+ 6.931471805599453094e-1,
190
+ 1.791759469228055001e0,
191
+ 3.178053830347945620e0,
192
+ 4.787491742782045994e0,
193
+ 6.579251212010100995e0,
194
+ 8.525161361065414300e0,
195
+ 1.060460290274525023e1
196
+ };
197
+
198
+
199
+ QUALIFIERS double __cr_lgamma_integer(int a)
200
+ {
201
+ double s;
202
+ double t;
203
+ double fa = fabs((float)a);
204
+ double sum;
205
+
206
+ if (a > 8) {
207
+ /* Stirling approximation; coefficients from Hart et al, "Computer
208
+ * Approximations", Wiley 1968. Approximation 5404.
209
+ */
210
+ s = 1.0 / fa;
211
+ t = s * s;
212
+ sum = -0.1633436431e-2;
213
+ sum = sum * t + 0.83645878922e-3;
214
+ sum = sum * t - 0.5951896861197e-3;
215
+ sum = sum * t + 0.793650576493454e-3;
216
+ sum = sum * t - 0.277777777735865004e-2;
217
+ sum = sum * t + 0.833333333333331018375e-1;
218
+ sum = sum * s + 0.918938533204672;
219
+ s = 0.5 * log (fa);
220
+ t = fa - 0.5;
221
+ s = s * t;
222
+ t = s - fa;
223
+ s = s + sum;
224
+ t = t + s;
225
+ return t;
226
+ } else {
227
+ NV_IF_ELSE_TARGET(NV_IS_DEVICE,
228
+ return __cr_lgamma_table [(int) fa-1];
229
+ ,
230
+ switch(a) {
231
+ case 1: return 0.000000000000000000e-1;
232
+ case 2: return 0.000000000000000000e-1;
233
+ case 3: return 6.931471805599453094e-1;
234
+ case 4: return 1.791759469228055001e0;
235
+ case 5: return 3.178053830347945620e0;
236
+ case 6: return 4.787491742782045994e0;
237
+ case 7: return 6.579251212010100995e0;
238
+ case 8: return 8.525161361065414300e0;
239
+ default: return 1.060460290274525023e1;
240
+ }
241
+ )
242
+ }
243
+ }
244
+
245
+ #define KNUTH_FLOAT_CONST 60.0
246
+ template <typename T>
247
+ // Donald E. Knuth Seminumerical Algorithms. The Art of Computer Programming, Volume 2
248
+ QUALIFIERS unsigned int curand_poisson_knuth(T *state, float lambda)
249
+ {
250
+ unsigned int k = 0;
251
+ float p = expf(lambda);
252
+ do{
253
+ k++;
254
+ p *= curand_uniform(state);
255
+ }while (p > 1.0);
256
+ return k-1;
257
+ }
258
+
259
+ template <typename T>
260
+ // Donald E. Knuth Seminumerical Algorithms. The Art of Computer Programming, Volume 2
261
+ QUALIFIERS uint4 curand_poisson_knuth4(T *state, float lambda)
262
+ {
263
+ uint4 k = {0,0,0,0};
264
+ float exp_lambda = expf(lambda);
265
+ float4 p={ exp_lambda,exp_lambda,exp_lambda,exp_lambda };
266
+ do{
267
+ k.x++;
268
+ p.x *= curand_uniform(state);
269
+ }while (p.x > 1.0);
270
+ do{
271
+ k.y++;
272
+ p.y *= curand_uniform(state);
273
+ }while (p.y > 1.0);
274
+ do{
275
+ k.z++;
276
+ p.z *= curand_uniform(state);
277
+ }while (p.z > 1.0);
278
+ do{
279
+ k.w++;
280
+ p.w *= curand_uniform(state);
281
+ }while (p.w > 1.0);
282
+
283
+ k.x--;
284
+ k.y--;
285
+ k.z--;
286
+ k.w--;
287
+ return k;
288
+ }
289
+
290
+ template <typename T>
291
+ // Marsaglia, Tsang, Wang Journal of Statistical Software, square histogram.
292
+ QUALIFIERS unsigned int _curand_M2_double(T x, curandDistributionM2Shift_t distributionM2)
293
+ {
294
+ double u = _curand_uniform_double(x);
295
+ int j = (int) floor(distributionM2->length*u);
296
+
297
+ double histogramVj;
298
+ unsigned int histogramKj;
299
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35,
300
+ histogramVj = __ldg( &(distributionM2->histogram->V[j]));
301
+ histogramKj = __ldg( &(distributionM2->histogram->K[j]));
302
+ ,
303
+ histogramVj = distributionM2->histogram->V[j];
304
+ histogramKj = distributionM2->histogram->K[j];
305
+ )
306
+ //if (u < distributionM2->histogram->V[j]) return distributionM2->shift + j;
307
+ //return distributionM2->shift + distributionM2->histogram->K[j];
308
+ if (u < histogramVj) return distributionM2->shift + j;
309
+ return distributionM2->shift + histogramKj;
310
+ }
311
+
312
+ template <typename T>
313
+ // Marsaglia, Tsang, Wang Journal of Statistical Software, square histogram.
314
+ QUALIFIERS uint4 _curand_M2_double4(T x, curandDistributionM2Shift_t distributionM2)
315
+ {
316
+ double4 u;
317
+ uint4 result = {0,0,0,0};
318
+ int4 flag = {1,1,1,1};
319
+
320
+ u.x = _curand_uniform_double(x.x);
321
+ u.y = _curand_uniform_double(x.y);
322
+ u.z = _curand_uniform_double(x.z);
323
+ u.w = _curand_uniform_double(x.w);
324
+
325
+ int4 j;
326
+ j.x = (int) floor(distributionM2->length*u.x);
327
+ j.y = (int) floor(distributionM2->length*u.y);
328
+ j.z = (int) floor(distributionM2->length*u.z);
329
+ j.w = (int) floor(distributionM2->length*u.w);
330
+ // int result;
331
+
332
+ double histogramVjx;
333
+ double histogramVjy;
334
+ double histogramVjz;
335
+ double histogramVjw;
336
+ unsigned int histogramKjx;
337
+ unsigned int histogramKjy;
338
+ unsigned int histogramKjz;
339
+ unsigned int histogramKjw;
340
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35,
341
+ histogramVjx = __ldg( &(distributionM2->histogram->V[j.x]));
342
+ histogramVjy = __ldg( &(distributionM2->histogram->V[j.y]));
343
+ histogramVjz = __ldg( &(distributionM2->histogram->V[j.z]));
344
+ histogramVjw = __ldg( &(distributionM2->histogram->V[j.w]));
345
+
346
+ histogramKjx = __ldg( &(distributionM2->histogram->K[j.x]));
347
+ histogramKjy = __ldg( &(distributionM2->histogram->K[j.y]));
348
+ histogramKjz = __ldg( &(distributionM2->histogram->K[j.z]));
349
+ histogramKjw = __ldg( &(distributionM2->histogram->K[j.w]));
350
+ ,
351
+ histogramVjx = distributionM2->histogram->V[j.x];
352
+ histogramVjy = distributionM2->histogram->V[j.y];
353
+ histogramVjz = distributionM2->histogram->V[j.z];
354
+ histogramVjw = distributionM2->histogram->V[j.w];
355
+
356
+ histogramKjx = distributionM2->histogram->K[j.x];
357
+ histogramKjy = distributionM2->histogram->K[j.y];
358
+ histogramKjz = distributionM2->histogram->K[j.z];
359
+ histogramKjw = distributionM2->histogram->K[j.w];
360
+ )
361
+
362
+ if (u.x < histogramVjx){ result.x = distributionM2->shift + j.x; flag.x = 0; }
363
+ if (u.y < histogramVjy){ result.y = distributionM2->shift + j.y; flag.y = 0; }
364
+ if (u.z < histogramVjz){ result.z = distributionM2->shift + j.z; flag.z = 0; }
365
+ if (u.w < histogramVjw){ result.w = distributionM2->shift + j.w; flag.w = 0; }
366
+ //return distributionM2->shift + distributionM2->histogram->K[j];
367
+
368
+ if(flag.x) result.x = distributionM2->shift + histogramKjx;
369
+ if(flag.y) result.y = distributionM2->shift + histogramKjy;
370
+ if(flag.z) result.z = distributionM2->shift + histogramKjz;
371
+ if(flag.w) result.w = distributionM2->shift + histogramKjw;
372
+
373
+ return result;
374
+ }
375
+
376
+ template <typename STATE>
377
+ QUALIFIERS unsigned int curand_M2_double(STATE *state, curandDistributionM2Shift_t distributionM2)
378
+ {
379
+ return _curand_M2_double(curand(state), distributionM2);
380
+ }
381
+
382
+ template <typename STATE>
383
+ QUALIFIERS uint4 curand_M2_double4(STATE *state, curandDistributionM2Shift_t distributionM2)
384
+ {
385
+ return _curand_M2_double4(curand4(state), distributionM2);
386
+ }
387
+
388
+
389
+ template <typename T>
390
+ QUALIFIERS unsigned int _curand_binary_search_double(T x, curandDistributionShift_t distribution)
391
+ {
392
+ double u = _curand_uniform_double(x);
393
+ int min = 0;
394
+ int max = distribution->length-1;
395
+ do{
396
+ int mid = (max + min)/2;
397
+ double probability_mid;
398
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35,
399
+ probability_mid = __ldg( &(distribution->probability[mid]));
400
+ ,
401
+ probability_mid = distribution->probability[mid];
402
+ )
403
+ if (u <= probability_mid){
404
+ max = mid;
405
+ }else{
406
+ min = mid+1;
407
+ }
408
+ }while (min < max);
409
+ return distribution->shift + min;
410
+ }
411
+
412
+ template <typename STATE>
413
+ QUALIFIERS unsigned int curand_binary_search_double(STATE *state, curandDistributionShift_t distribution)
414
+ {
415
+ return _curand_binary_search_double(curand(state), distribution);
416
+ }
417
+
418
+ // Generates uniformly distributed double values in range (0.0; 1.0) from uniformly distributed
419
+ // unsigned int. We can't use standard _curand_uniform_double since it can generate 1.0.
420
+ // This is required only for _curand_poisson_ITR_double.
421
+ QUALIFIERS double _curand_uniform_double_excluding_one(unsigned int x)
422
+ {
423
+ return x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0);
424
+ }
425
+
426
+ // Overload for unsigned long long.
427
+ // This is required only for _curand_poisson_ITR_double.
428
+ QUALIFIERS double _curand_uniform_double_excluding_one(unsigned long long x)
429
+ {
430
+ return (x >> 11) * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/4.0);
431
+ }
432
+
433
+ #define MAGIC_DOUBLE_CONST 500.0
434
+ template <typename T>
435
+ //George S. Fishman Discrete-event simulation: modeling, programming, and analysis
436
+ QUALIFIERS unsigned int _curand_poisson_ITR_double(T x, double lambda)
437
+ {
438
+ double L,p = 1.0;
439
+ double q = 1.0;
440
+ unsigned int k = 0;
441
+ int pow=0;
442
+ // This algorithm requires u to be in (0;1) range, however, _curand_uniform_double
443
+ // returns a number in range (0;1]. If u is 1.0 the inner loop never ends. The
444
+ // following operation transforms the range from (0;1] to (0;1).
445
+ double u = _curand_uniform_double_excluding_one(x);
446
+ do{
447
+ if (lambda > (double)(pow+MAGIC_DOUBLE_CONST)){
448
+ L = exp(-MAGIC_DOUBLE_CONST);
449
+ }else{
450
+ L = exp((double)(pow - lambda));
451
+ }
452
+ p *= L;
453
+ q *= L;
454
+ pow += (int) MAGIC_DOUBLE_CONST;
455
+ while (u > q){
456
+ k++;
457
+ p *= ((double)lambda / (double) k);
458
+ q += p;
459
+ }
460
+ }while((double)pow < lambda);
461
+ return k;
462
+ }
463
+
464
+ template <typename T>
465
+ /* Rejection Method for Poisson distribution based on gammainc approximation */
466
+ QUALIFIERS unsigned int curand_poisson_gammainc(T state, float lambda){
467
+ float y, x, t, z,v;
468
+ float logl = __cr_log (lambda);
469
+ while (true) {
470
+ y = curand_uniform (state);
471
+ x = __cr_pgammaincinv (lambda, y);
472
+ x = floorf (x);
473
+ z = curand_uniform (state);
474
+ v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
475
+ z = z*v;
476
+ t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
477
+ if ((z < t) && (v>=1e-20))
478
+ break;
479
+ }
480
+ return (unsigned int)x;
481
+ }
482
+
483
+ template <typename T>
484
+ /* Rejection Method for Poisson distribution based on gammainc approximation */
485
+ QUALIFIERS uint4 curand_poisson_gammainc4(T state, float lambda){
486
+ uint4 result;
487
+ float y, x, t, z,v;
488
+ float logl = __cr_log (lambda);
489
+ while (true) {
490
+ y = curand_uniform(state);
491
+ x = __cr_pgammaincinv (lambda, y);
492
+ x = floorf (x);
493
+ z = curand_uniform (state);
494
+ v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
495
+ z = z*v;
496
+ t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
497
+ if ((z < t) && (v>=1e-20))
498
+ break;
499
+ }
500
+ result.x = (unsigned int)x;
501
+
502
+ while (true) {
503
+ y = curand_uniform(state);
504
+ x = __cr_pgammaincinv (lambda, y);
505
+ x = floorf (x);
506
+ z = curand_uniform (state);
507
+ v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
508
+ z = z*v;
509
+ t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
510
+ if ((z < t) && (v>=1e-20))
511
+ break;
512
+ }
513
+ result.y = (unsigned int)x;
514
+
515
+ while (true) {
516
+ y = curand_uniform(state);
517
+ x = __cr_pgammaincinv (lambda, y);
518
+ x = floorf (x);
519
+ z = curand_uniform (state);
520
+ v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
521
+ z = z*v;
522
+ t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
523
+ if ((z < t) && (v>=1e-20))
524
+ break;
525
+ }
526
+ result.z = (unsigned int)x;
527
+
528
+ while (true) {
529
+ y = curand_uniform(state);
530
+ x = __cr_pgammaincinv (lambda, y);
531
+ x = floorf (x);
532
+ z = curand_uniform (state);
533
+ v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f;
534
+ z = z*v;
535
+ t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x)));
536
+ if ((z < t) && (v>=1e-20))
537
+ break;
538
+ }
539
+ result.w = (unsigned int)x;
540
+
541
+ return result;
542
+ }
543
+ // Note below that the round to nearest integer, where needed,is done in line with code that
544
+ // assumes the range of values is < 2**32
545
+
546
+ template <typename T>
547
+ QUALIFIERS unsigned int _curand_poisson(T x, double lambda)
548
+ {
549
+ if (lambda < 1000)
550
+ return _curand_poisson_ITR_double(x, lambda);
551
+ return (unsigned int)((sqrt(lambda) * _curand_normal_icdf_double(x)) + lambda + 0.5); //Round to nearest
552
+ }
553
+
554
+ template <typename T>
555
+ QUALIFIERS unsigned int _curand_poisson_from_normal(T x, double lambda)
556
+ {
557
+ return (unsigned int)((sqrt(lambda) * _curand_normal_icdf(x)) + lambda + 0.5); //Round to nearest
558
+ }
559
+
560
+ template <typename STATE>
561
+ QUALIFIERS unsigned int curand_poisson_from_normal(STATE state, double lambda)
562
+ {
563
+ return (unsigned int)((sqrt(lambda) * curand_normal(state)) + lambda + 0.5); //Round to nearest
564
+ }
565
+
566
+ template <typename STATE>
567
+ QUALIFIERS uint4 curand_poisson_from_normal4(STATE state, double lambda)
568
+ {
569
+ uint4 result;
570
+ float4 _res;
571
+
572
+ _res = curand_normal4(state);
573
+
574
+ result.x = (unsigned int)((sqrt(lambda) * _res.x) + lambda + 0.5); //Round to nearest
575
+ result.y = (unsigned int)((sqrt(lambda) * _res.y) + lambda + 0.5); //Round to nearest
576
+ result.z = (unsigned int)((sqrt(lambda) * _res.z) + lambda + 0.5); //Round to nearest
577
+ result.w = (unsigned int)((sqrt(lambda) * _res.w) + lambda + 0.5); //Round to nearest
578
+ return result; //Round to nearest
579
+ }
580
+
581
+ /**
582
+ * \brief Return a Poisson-distributed unsigned int from a XORWOW generator.
583
+ *
584
+ * Return a single unsigned int from a Poisson
585
+ * distribution with lambda \p lambda from the XORWOW generator in \p state,
586
+ * increment the position of the generator by a variable amount, depending
587
+ * on the algorithm used.
588
+ *
589
+ * \param state - Pointer to state to update
590
+ * \param lambda - Lambda of the Poisson distribution
591
+ *
592
+ * \return Poisson-distributed unsigned int with lambda \p lambda
593
+ */
594
+ QUALIFIERS unsigned int curand_poisson(curandStateXORWOW_t *state, double lambda)
595
+ {
596
+ if (lambda < 64)
597
+ return curand_poisson_knuth(state, (float)lambda);
598
+ if (lambda > 4000)
599
+ return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest
600
+ return curand_poisson_gammainc(state, (float)lambda);
601
+ }
602
+
603
+ /**
604
+ * \brief Return a Poisson-distributed unsigned int from a Philox4_32_10 generator.
605
+ *
606
+ * Return a single unsigned int from a Poisson
607
+ * distribution with lambda \p lambda from the Philox4_32_10 generator in \p state,
608
+ * increment the position of the generator by a variable amount, depending
609
+ * on the algorithm used.
610
+ *
611
+ * \param state - Pointer to state to update
612
+ * \param lambda - Lambda of the Poisson distribution
613
+ *
614
+ * \return Poisson-distributed unsigned int with lambda \p lambda
615
+ */
616
+ QUALIFIERS unsigned int curand_poisson(curandStatePhilox4_32_10_t *state, double lambda)
617
+ {
618
+ if (lambda < 64)
619
+ return curand_poisson_knuth(state, (float)lambda);
620
+ if (lambda > 4000)
621
+ return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest
622
+ return curand_poisson_gammainc(state, (float)lambda);
623
+ }
624
+ /**
625
+ * \brief Return four Poisson-distributed unsigned ints from a Philox4_32_10 generator.
626
+ *
627
+ * Return a four unsigned ints from a Poisson
628
+ * distribution with lambda \p lambda from the Philox4_32_10 generator in \p state,
629
+ * increment the position of the generator by a variable amount, depending
630
+ * on the algorithm used.
631
+ *
632
+ * \param state - Pointer to state to update
633
+ * \param lambda - Lambda of the Poisson distribution
634
+ *
635
+ * \return Poisson-distributed unsigned int with lambda \p lambda
636
+ */
637
+ QUALIFIERS uint4 curand_poisson4(curandStatePhilox4_32_10_t *state, double lambda)
638
+ {
639
+ uint4 result;
640
+ double4 _res;
641
+ if (lambda < 64)
642
+ return curand_poisson_knuth4(state, (float)lambda);
643
+ if (lambda > 4000) {
644
+ _res = curand_normal4_double(state);
645
+ result.x = (unsigned int)((sqrt(lambda) * _res.x) + lambda + 0.5); //Round to nearest
646
+ result.y = (unsigned int)((sqrt(lambda) * _res.y) + lambda + 0.5); //Round to nearest
647
+ result.z = (unsigned int)((sqrt(lambda) * _res.z) + lambda + 0.5); //Round to nearest
648
+ result.w = (unsigned int)((sqrt(lambda) * _res.w) + lambda + 0.5); //Round to nearest
649
+ return result;
650
+ }
651
+ return curand_poisson_gammainc4(state, (float)lambda);
652
+ }
653
+
654
+
655
+
656
+ /**
657
+ * \brief Return a Poisson-distributed unsigned int from a MRG32k3A generator.
658
+ *
659
+ * Return a single unsigned int from a Poisson
660
+ * distribution with lambda \p lambda from the MRG32k3a generator in \p state,
661
+ * increment the position of the generator by a variable amount, depending
662
+ * on the algorithm used.
663
+ *
664
+ * \param state - Pointer to state to update
665
+ * \param lambda - Lambda of the Poisson distribution
666
+ *
667
+ * \return Poisson-distributed unsigned int with lambda \p lambda
668
+ */
669
+ QUALIFIERS unsigned int curand_poisson(curandStateMRG32k3a_t *state, double lambda)
670
+ {
671
+ if (lambda < 64)
672
+ return curand_poisson_knuth(state, (float)lambda);
673
+ if (lambda > 4000)
674
+ return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest
675
+ return curand_poisson_gammainc(state, (float)lambda);
676
+ }
677
+
678
+ /**
679
+ * \brief Return a Poisson-distributed unsigned int from a MTGP32 generator.
680
+ *
681
+ * Return a single int from a Poisson
682
+ * distribution with lambda \p lambda from the MTGP32 generator in \p state,
683
+ * increment the position of the generator by one.
684
+ *
685
+ * \param state - Pointer to state to update
686
+ * \param lambda - Lambda of the Poisson distribution
687
+ *
688
+ * \return Poisson-distributed unsigned int with lambda \p lambda
689
+ */
690
+ QUALIFIERS unsigned int curand_poisson(curandStateMtgp32_t *state, double lambda)
691
+ {
692
+ return _curand_poisson(curand(state), lambda);
693
+ }
694
+
695
+ /**
696
+ * \brief Return a Poisson-distributed unsigned int from a Sobol32 generator.
697
+ *
698
+ * Return a single unsigned int from a Poisson
699
+ * distribution with lambda \p lambda from the Sobol32 generator in \p state,
700
+ * increment the position of the generator by one.
701
+ *
702
+ * \param state - Pointer to state to update
703
+ * \param lambda - Lambda of the Poisson distribution
704
+ *
705
+ * \return Poisson-distributed unsigned int with lambda \p lambda
706
+ */
707
+
708
+ QUALIFIERS unsigned int curand_poisson(curandStateSobol32_t *state, double lambda)
709
+ {
710
+ return _curand_poisson(curand(state), lambda);
711
+ }
712
+
713
+ /**
714
+ * \brief Return a Poisson-distributed unsigned int from a scrambled Sobol32 generator.
715
+ *
716
+ * Return a single unsigned int from a Poisson
717
+ * distribution with lambda \p lambda from the scrambled Sobol32 generator in \p state,
718
+ * increment the position of the generator by one.
719
+ *
720
+ * \param state - Pointer to state to update
721
+ * \param lambda - Lambda of the Poisson distribution
722
+ *
723
+ * \return Poisson-distributed unsigned int with lambda \p lambda
724
+ */
725
+ QUALIFIERS unsigned int curand_poisson(curandStateScrambledSobol32_t *state, double lambda)
726
+ {
727
+ return _curand_poisson(curand(state), lambda);
728
+ }
729
+
730
+ /**
731
+ * \brief Return a Poisson-distributed unsigned int from a Sobol64 generator.
732
+ *
733
+ * Return a single unsigned int from a Poisson
734
+ * distribution with lambda \p lambda from the Sobol64 generator in \p state,
735
+ * increment position of generator by one.
736
+ *
737
+ * \param state - Pointer to state to update
738
+ * \param lambda - Lambda of the Poisson distribution
739
+ *
740
+ * \return Poisson-distributed unsigned int with lambda \p lambda
741
+ */
742
+ QUALIFIERS unsigned int curand_poisson(curandStateSobol64_t *state, double lambda)
743
+ {
744
+ return _curand_poisson(curand(state), lambda);
745
+ }
746
+
747
+ /**
748
+ * \brief Return a Poisson-distributed unsigned int from a scrambled Sobol64 generator.
749
+ *
750
+ * Return a single unsigned int from a Poisson
751
+ * distribution with lambda \p lambda from the scrambled Sobol64 generator in \p state,
752
+ * increment position of generator by one.
753
+ *
754
+ * \param state - Pointer to state to update
755
+ * \param lambda - Lambda of the Poisson distribution
756
+ *
757
+ * \return Poisson-distributed unsigned int with lambda \p lambda
758
+ */
759
+ QUALIFIERS unsigned int curand_poisson(curandStateScrambledSobol64_t *state, double lambda)
760
+ {
761
+ return _curand_poisson(curand(state), lambda);
762
+ }
763
+ #endif // !defined(CURAND_POISSON_H_)
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_precalc.h ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/include/curand_uniform.h ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* Copyright 2010-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * The source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * The Licensed Deliverables contained herein are PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #if !defined(CURAND_UNIFORM_H_)
52
+ #define CURAND_UNIFORM_H_
53
+
54
+ /**
55
+ * \defgroup DEVICE Device API
56
+ *
57
+ * @{
58
+ */
59
+
60
+ #ifndef __CUDACC_RTC__
61
+ #include <math.h>
62
+ #endif // __CUDACC_RTC__
63
+
64
+ #include "curand_mrg32k3a.h"
65
+ #include "curand_mtgp32_kernel.h"
66
+ #include "curand_philox4x32_x.h"
67
+
68
+
69
+ QUALIFIERS float _curand_uniform(unsigned int x)
70
+ {
71
+ return x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
72
+ }
73
+
74
+ QUALIFIERS float4 _curand_uniform4(uint4 x)
75
+ {
76
+ float4 y;
77
+ y.x = x.x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
78
+ y.y = x.y * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
79
+ y.z = x.z * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
80
+ y.w = x.w * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
81
+ return y;
82
+ }
83
+
84
+ QUALIFIERS float _curand_uniform(unsigned long long x)
85
+ {
86
+ unsigned int t;
87
+ t = (unsigned int)(x >> 32);
88
+ return t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f);
89
+ }
90
+
91
+ QUALIFIERS double _curand_uniform_double(unsigned int x)
92
+ {
93
+ return x * CURAND_2POW32_INV_DOUBLE + CURAND_2POW32_INV_DOUBLE;
94
+ }
95
+
96
+ QUALIFIERS double _curand_uniform_double(unsigned long long x)
97
+ {
98
+ return (x >> 11) * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
99
+ }
100
+
101
+ QUALIFIERS double _curand_uniform_double_hq(unsigned int x, unsigned int y)
102
+ {
103
+ unsigned long long z = (unsigned long long)x ^
104
+ ((unsigned long long)y << (53 - 32));
105
+ return z * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0);
106
+ }
107
+
108
+ QUALIFIERS float curand_uniform(curandStateTest_t *state)
109
+ {
110
+ return _curand_uniform(curand(state));
111
+ }
112
+
113
+ QUALIFIERS double curand_uniform_double(curandStateTest_t *state)
114
+ {
115
+ return _curand_uniform_double(curand(state));
116
+ }
117
+
118
+ /**
119
+ * \brief Return a uniformly distributed float from an XORWOW generator.
120
+ *
121
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
122
+ * from the XORWOW generator in \p state, increment position of generator.
123
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
124
+ * point outputs are never returned.
125
+ *
126
+ * The implementation may use any number of calls to \p curand() to
127
+ * get enough random bits to create the return value. The current
128
+ * implementation uses one call.
129
+ *
130
+ * \param state - Pointer to state to update
131
+ *
132
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
133
+ */
134
+ QUALIFIERS float curand_uniform(curandStateXORWOW_t *state)
135
+ {
136
+ return _curand_uniform(curand(state));
137
+ }
138
+
139
+ /**
140
+ * \brief Return a uniformly distributed double from an XORWOW generator.
141
+ *
142
+ * Return a uniformly distributed double between \p 0.0 and \p 1.0
143
+ * from the XORWOW generator in \p state, increment position of generator.
144
+ * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
145
+ * point outputs are never returned.
146
+ *
147
+ * The implementation may use any number of calls to \p curand() to
148
+ * get enough random bits to create the return value. The current
149
+ * implementation uses exactly two calls.
150
+ *
151
+ * \param state - Pointer to state to update
152
+ *
153
+ * \return uniformly distributed double between \p 0.0 and \p 1.0
154
+ */
155
+ QUALIFIERS double curand_uniform_double(curandStateXORWOW_t *state)
156
+ {
157
+ unsigned int x, y;
158
+ x = curand(state);
159
+ y = curand(state);
160
+ return _curand_uniform_double_hq(x, y);
161
+ }
162
+ /**
163
+ * \brief Return a uniformly distributed float from an MRG32k3a generator.
164
+ *
165
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
166
+ * from the MRG32k3a generator in \p state, increment position of generator.
167
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
168
+ * point outputs are never returned.
169
+ *
170
+ * The implementation returns up to 23 bits of mantissa, with the minimum
171
+ * return value \f$ 2^{-32} \f$
172
+ *
173
+ * \param state - Pointer to state to update
174
+ *
175
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
176
+ */
177
+ QUALIFIERS float curand_uniform(curandStateMRG32k3a_t *state)
178
+ {
179
+ return ((float)(curand_MRG32k3a(state)*MRG32K3A_NORM));
180
+ }
181
+
182
+ /**
183
+ * \brief Return a uniformly distributed double from an MRG32k3a generator.
184
+ *
185
+ * Return a uniformly distributed double between \p 0.0 and \p 1.0
186
+ * from the MRG32k3a generator in \p state, increment position of generator.
187
+ * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
188
+ * point outputs are never returned.
189
+ *
190
+ * Note the implementation returns at most 32 random bits of mantissa as
191
+ * outlined in the seminal paper by L'Ecuyer.
192
+ *
193
+ * \param state - Pointer to state to update
194
+ *
195
+ * \return uniformly distributed double between \p 0.0 and \p 1.0
196
+ */
197
+ QUALIFIERS double curand_uniform_double(curandStateMRG32k3a_t *state)
198
+ {
199
+ return curand_MRG32k3a(state)*MRG32K3A_NORM;
200
+ }
201
+
202
+
203
+
204
+ /**
205
+ * \brief Return a uniformly distributed tuple of 2 doubles from an Philox4_32_10 generator.
206
+ *
207
+ * Return a uniformly distributed 2 doubles (double4) between \p 0.0 and \p 1.0
208
+ * from the Philox4_32_10 generator in \p state, increment position of generator by 4.
209
+ * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
210
+ * point outputs are never returned.
211
+ *
212
+ * \param state - Pointer to state to update
213
+ *
214
+ * \return 2 uniformly distributed doubles between \p 0.0 and \p 1.0
215
+ */
216
+
217
+ QUALIFIERS double2 curand_uniform2_double(curandStatePhilox4_32_10_t *state)
218
+ {
219
+ uint4 _x;
220
+ double2 result;
221
+ _x = curand4(state);
222
+ result.x = _curand_uniform_double_hq(_x.x,_x.y);
223
+ result.y = _curand_uniform_double_hq(_x.z,_x.w);
224
+ return result;
225
+ }
226
+
227
+
228
+ // not a part of API
229
+ QUALIFIERS double4 curand_uniform4_double(curandStatePhilox4_32_10_t *state)
230
+ {
231
+ uint4 _x, _y;
232
+ double4 result;
233
+ _x = curand4(state);
234
+ _y = curand4(state);
235
+ result.x = _curand_uniform_double_hq(_x.x,_x.y);
236
+ result.y = _curand_uniform_double_hq(_x.z,_x.w);
237
+ result.z = _curand_uniform_double_hq(_y.x,_y.y);
238
+ result.w = _curand_uniform_double_hq(_y.z,_y.w);
239
+ return result;
240
+ }
241
+
242
+ /**
243
+ * \brief Return a uniformly distributed float from a Philox4_32_10 generator.
244
+ *
245
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
246
+ * from the Philox4_32_10 generator in \p state, increment position of generator.
247
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
248
+ * point outputs are never returned.
249
+ *
250
+ * \param state - Pointer to state to update
251
+ *
252
+ * \return uniformly distributed float between \p 0.0 and \p 1.0
253
+ *
254
+ */
255
+ QUALIFIERS float curand_uniform(curandStatePhilox4_32_10_t *state)
256
+ {
257
+ return _curand_uniform(curand(state));
258
+ }
259
+
260
+ /**
261
+ * \brief Return a uniformly distributed tuple of 4 floats from a Philox4_32_10 generator.
262
+ *
263
+ * Return a uniformly distributed 4 floats between \p 0.0f and \p 1.0f
264
+ * from the Philox4_32_10 generator in \p state, increment position of generator by 4.
265
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
266
+ * point outputs are never returned.
267
+ *
268
+ * \param state - Pointer to state to update
269
+ *
270
+ * \return uniformly distributed float between \p 0.0 and \p 1.0
271
+ *
272
+ */
273
+ QUALIFIERS float4 curand_uniform4(curandStatePhilox4_32_10_t *state)
274
+ {
275
+ return _curand_uniform4(curand4(state));
276
+ }
277
+
278
+ /**
279
+ * \brief Return a uniformly distributed float from a MTGP32 generator.
280
+ *
281
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
282
+ * from the MTGP32 generator in \p state, increment position of generator.
283
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
284
+ * point outputs are never returned.
285
+ *
286
+ * \param state - Pointer to state to update
287
+ *
288
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
289
+ */
290
+ QUALIFIERS float curand_uniform(curandStateMtgp32_t *state)
291
+ {
292
+ return _curand_uniform(curand(state));
293
+ }
294
+ /**
295
+ * \brief Return a uniformly distributed double from a MTGP32 generator.
296
+ *
297
+ * Return a uniformly distributed double between \p 0.0f and \p 1.0f
298
+ * from the MTGP32 generator in \p state, increment position of generator.
299
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
300
+ * point outputs are never returned.
301
+ *
302
+ * Note that the implementation uses only 32 random bits to generate a single double
303
+ * precision value.
304
+ *
305
+ * \param state - Pointer to state to update
306
+ *
307
+ * \return uniformly distributed double between \p 0.0f and \p 1.0f
308
+ */
309
+ QUALIFIERS double curand_uniform_double(curandStateMtgp32_t *state)
310
+ {
311
+ return _curand_uniform_double(curand(state));
312
+ }
313
+
314
+ /**
315
+ * \brief Return a uniformly distributed double from a Philox4_32_10 generator.
316
+ *
317
+ * Return a uniformly distributed double between \p 0.0f and \p 1.0f
318
+ * from the Philox4_32_10 generator in \p state, increment position of generator.
319
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
320
+ * point outputs are never returned.
321
+ *
322
+ * Note that the implementation uses only 32 random bits to generate a single double
323
+ * precision value.
324
+ *
325
+ * \p curand_uniform2_double() is recommended for higher quality uniformly distributed
326
+ * double precision values.
327
+ *
328
+ * \param state - Pointer to state to update
329
+ *
330
+ * \return uniformly distributed double between \p 0.0f and \p 1.0f
331
+ */
332
+
333
+ QUALIFIERS double curand_uniform_double(curandStatePhilox4_32_10_t *state)
334
+ {
335
+ return _curand_uniform_double(curand(state));
336
+ }
337
+
338
+
339
+ /**
340
+ * \brief Return a uniformly distributed float from a Sobol32 generator.
341
+ *
342
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
343
+ * from the Sobol32 generator in \p state, increment position of generator.
344
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
345
+ * point outputs are never returned.
346
+ *
347
+ * The implementation is guaranteed to use a single call to \p curand().
348
+ *
349
+ * \param state - Pointer to state to update
350
+ *
351
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
352
+ */
353
+ QUALIFIERS float curand_uniform(curandStateSobol32_t *state)
354
+ {
355
+ return _curand_uniform(curand(state));
356
+ }
357
+
358
+ /**
359
+ * \brief Return a uniformly distributed double from a Sobol32 generator.
360
+ *
361
+ * Return a uniformly distributed double between \p 0.0 and \p 1.0
362
+ * from the Sobol32 generator in \p state, increment position of generator.
363
+ * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
364
+ * point outputs are never returned.
365
+ *
366
+ * The implementation is guaranteed to use a single call to \p curand()
367
+ * to preserve the quasirandom properties of the sequence.
368
+ *
369
+ * Note that the implementation uses only 32 random bits to generate a single double
370
+ * precision value.
371
+ *
372
+ * \param state - Pointer to state to update
373
+ *
374
+ * \return uniformly distributed double between \p 0.0 and \p 1.0
375
+ */
376
+ QUALIFIERS double curand_uniform_double(curandStateSobol32_t *state)
377
+ {
378
+ return _curand_uniform_double(curand(state));
379
+ }
380
+ /**
381
+ * \brief Return a uniformly distributed float from a scrambled Sobol32 generator.
382
+ *
383
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
384
+ * from the scrambled Sobol32 generator in \p state, increment position of generator.
385
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
386
+ * point outputs are never returned.
387
+ *
388
+ * The implementation is guaranteed to use a single call to \p curand().
389
+ *
390
+ * \param state - Pointer to state to update
391
+ *
392
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
393
+ */
394
+ QUALIFIERS float curand_uniform(curandStateScrambledSobol32_t *state)
395
+ {
396
+ return _curand_uniform(curand(state));
397
+ }
398
+
399
+ /**
400
+ * \brief Return a uniformly distributed double from a scrambled Sobol32 generator.
401
+ *
402
+ * Return a uniformly distributed double between \p 0.0 and \p 1.0
403
+ * from the scrambled Sobol32 generator in \p state, increment position of generator.
404
+ * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
405
+ * point outputs are never returned.
406
+ *
407
+ * The implementation is guaranteed to use a single call to \p curand()
408
+ * to preserve the quasirandom properties of the sequence.
409
+ *
410
+ * Note that the implementation uses only 32 random bits to generate a single double
411
+ * precision value.
412
+ *
413
+ * \param state - Pointer to state to update
414
+ *
415
+ * \return uniformly distributed double between \p 0.0 and \p 1.0
416
+ */
417
+ QUALIFIERS double curand_uniform_double(curandStateScrambledSobol32_t *state)
418
+ {
419
+ return _curand_uniform_double(curand(state));
420
+ }
421
+ /**
422
+ * \brief Return a uniformly distributed float from a Sobol64 generator.
423
+ *
424
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
425
+ * from the Sobol64 generator in \p state, increment position of generator.
426
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
427
+ * point outputs are never returned.
428
+ *
429
+ * The implementation is guaranteed to use a single call to \p curand().
430
+ *
431
+ * \param state - Pointer to state to update
432
+ *
433
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
434
+ */
435
+ QUALIFIERS float curand_uniform(curandStateSobol64_t *state)
436
+ {
437
+ return _curand_uniform(curand(state));
438
+ }
439
+
440
+ /**
441
+ * \brief Return a uniformly distributed double from a Sobol64 generator.
442
+ *
443
+ * Return a uniformly distributed double between \p 0.0 and \p 1.0
444
+ * from the Sobol64 generator in \p state, increment position of generator.
445
+ * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
446
+ * point outputs are never returned.
447
+ *
448
+ * The implementation is guaranteed to use a single call to \p curand()
449
+ * to preserve the quasirandom properties of the sequence.
450
+ *
451
+ * \param state - Pointer to state to update
452
+ *
453
+ * \return uniformly distributed double between \p 0.0 and \p 1.0
454
+ */
455
+ QUALIFIERS double curand_uniform_double(curandStateSobol64_t *state)
456
+ {
457
+ return _curand_uniform_double(curand(state));
458
+ }
459
+ /**
460
+ * \brief Return a uniformly distributed float from a scrambled Sobol64 generator.
461
+ *
462
+ * Return a uniformly distributed float between \p 0.0f and \p 1.0f
463
+ * from the scrambled Sobol64 generator in \p state, increment position of generator.
464
+ * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating
465
+ * point outputs are never returned.
466
+ *
467
+ * The implementation is guaranteed to use a single call to \p curand().
468
+ *
469
+ * \param state - Pointer to state to update
470
+ *
471
+ * \return uniformly distributed float between \p 0.0f and \p 1.0f
472
+ */
473
+ QUALIFIERS float curand_uniform(curandStateScrambledSobol64_t *state)
474
+ {
475
+ return _curand_uniform(curand(state));
476
+ }
477
+
478
+ /**
479
+ * \brief Return a uniformly distributed double from a scrambled Sobol64 generator.
480
+ *
481
+ * Return a uniformly distributed double between \p 0.0 and \p 1.0
482
+ * from the scrambled Sobol64 generator in \p state, increment position of generator.
483
+ * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating
484
+ * point outputs are never returned.
485
+ *
486
+ * The implementation is guaranteed to use a single call to \p curand()
487
+ * to preserve the quasirandom properties of the sequence.
488
+ *
489
+ * \param state - Pointer to state to update
490
+ *
491
+ * \return uniformly distributed double between \p 0.0 and \p 1.0
492
+ */
493
+ QUALIFIERS double curand_uniform_double(curandStateScrambledSobol64_t *state)
494
+ {
495
+ return _curand_uniform_double(curand(state));
496
+ }
497
+
498
+ #endif // !defined(CURAND_UNIFORM_H_)
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/lib/__init__.py ADDED
File without changes
evalkit_tf449/lib/python3.10/site-packages/nvidia/curand/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (174 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/__init__.py ADDED
File without changes
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverDn.h ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverMg.h ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CUSOLVERMG_H_)
51
+ #define CUSOLVERMG_H_
52
+
53
+ #include <stdint.h>
54
+ #include "cusolverDn.h"
55
+
56
+ #if defined(__cplusplus)
57
+ extern "C" {
58
+ #endif /* __cplusplus */
59
+
60
+ struct cusolverMgContext;
61
+ typedef struct cusolverMgContext *cusolverMgHandle_t;
62
+
63
+ /**
64
+ * \beief This enum decides how 1D device Ids (or process ranks) get mapped to
65
+ * a 2D grid.
66
+ */
67
+ typedef enum {
68
+
69
+ CUDALIBMG_GRID_MAPPING_ROW_MAJOR = 1,
70
+ CUDALIBMG_GRID_MAPPING_COL_MAJOR = 0
71
+
72
+ } cusolverMgGridMapping_t;
73
+
74
+ /** \brief Opaque structure of the distributed grid */
75
+ typedef void *cudaLibMgGrid_t;
76
+ /** \brief Opaque structure of the distributed matrix descriptor */
77
+ typedef void *cudaLibMgMatrixDesc_t;
78
+
79
+ cusolverStatus_t CUSOLVERAPI cusolverMgCreate(cusolverMgHandle_t *handle);
80
+
81
+ cusolverStatus_t CUSOLVERAPI cusolverMgDestroy(cusolverMgHandle_t handle);
82
+
83
+ cusolverStatus_t CUSOLVERAPI cusolverMgDeviceSelect(
84
+ cusolverMgHandle_t handle,
85
+ int nbDevices,
86
+ int deviceId[]);
87
+
88
+ /**
89
+ * \brief Allocates resources related to the shared memory device grid.
90
+ * \param[out] grid the opaque data strcuture that holds the grid
91
+ * \param[in] numRowDevices number of devices in the row
92
+ * \param[in] numColDevices number of devices in the column
93
+ * \param[in] deviceId This array of size height * width stores the
94
+ * device-ids of the 2D grid; each entry must correspond to a valid
95
+ * gpu or to -1 (denoting CPU). \param[in] mapping whether the 2D grid is in
96
+ * row/column major \returns the status code
97
+ */
98
+ cusolverStatus_t CUSOLVERAPI cusolverMgCreateDeviceGrid(
99
+ cudaLibMgGrid_t * grid,
100
+ int32_t numRowDevices,
101
+ int32_t numColDevices,
102
+ const int32_t deviceId[],
103
+ cusolverMgGridMapping_t mapping);
104
+
105
+ /**
106
+ * \brief Releases the allocated resources related to the distributed grid.
107
+ * \param[in] grid the opaque data strcuture that holds the distributed grid
108
+ * \returns the status code
109
+ */
110
+ cusolverStatus_t CUSOLVERAPI cusolverMgDestroyGrid(cudaLibMgGrid_t grid);
111
+
112
+ /**
113
+ * \brief Allocates resources related to the distributed matrix descriptor.
114
+ * \param[out] desc the opaque data strcuture that holds the descriptor
115
+ * \param[in] numRows number of total rows
116
+ * \param[in] numCols number of total columns
117
+ * \param[in] rowBlockSize row block size
118
+ * \param[in] colBlockSize column block size
119
+ * \param[in] dataType the data type of each element in cudaDataType
120
+ * \param[in] grid the opaque data structure of the distributed grid
121
+ * \returns the status code
122
+ */
123
+ cusolverStatus_t CUSOLVERAPI cusolverMgCreateMatrixDesc(
124
+ cudaLibMgMatrixDesc_t *desc,
125
+ int64_t numRows,
126
+ int64_t numCols,
127
+ int64_t rowBlockSize,
128
+ int64_t colBlockSize,
129
+ cudaDataType dataType,
130
+ const cudaLibMgGrid_t grid);
131
+
132
+ /**
133
+ * \brief Releases the allocated resources related to the distributed matrix
134
+ * descriptor. \param[in] desc the opaque data strcuture that holds the
135
+ * descriptor \returns the status code
136
+ */
137
+ cusolverStatus_t CUSOLVERAPI
138
+ cusolverMgDestroyMatrixDesc(cudaLibMgMatrixDesc_t desc);
139
+
140
+ cusolverStatus_t CUSOLVERAPI cusolverMgSyevd_bufferSize(
141
+ cusolverMgHandle_t handle,
142
+ cusolverEigMode_t jobz,
143
+ cublasFillMode_t uplo,
144
+ int N,
145
+ void * array_d_A[],
146
+ int IA,
147
+ int JA,
148
+ cudaLibMgMatrixDesc_t descrA,
149
+ void * W,
150
+ cudaDataType dataTypeW,
151
+ cudaDataType computeType,
152
+ int64_t * lwork);
153
+
154
+ cusolverStatus_t CUSOLVERAPI cusolverMgSyevd(
155
+ cusolverMgHandle_t handle,
156
+ cusolverEigMode_t jobz,
157
+ cublasFillMode_t uplo,
158
+ int N,
159
+ void * array_d_A[],
160
+ int IA,
161
+ int JA,
162
+ cudaLibMgMatrixDesc_t descrA,
163
+ void * W,
164
+ cudaDataType dataTypeW,
165
+ cudaDataType computeType,
166
+ void * array_d_work[],
167
+ int64_t lwork,
168
+ int * info);
169
+
170
+ cusolverStatus_t CUSOLVERAPI cusolverMgGetrf_bufferSize(
171
+ cusolverMgHandle_t handle,
172
+ int M,
173
+ int N,
174
+ void * array_d_A[],
175
+ int IA,
176
+ int JA,
177
+ cudaLibMgMatrixDesc_t descrA,
178
+ int * array_d_IPIV[],
179
+ cudaDataType computeType,
180
+ int64_t * lwork);
181
+
182
+ cusolverStatus_t CUSOLVERAPI cusolverMgGetrf(
183
+ cusolverMgHandle_t handle,
184
+ int M,
185
+ int N,
186
+ void * array_d_A[],
187
+ int IA,
188
+ int JA,
189
+ cudaLibMgMatrixDesc_t descrA,
190
+ int * array_d_IPIV[],
191
+ cudaDataType computeType,
192
+ void * array_d_work[],
193
+ int64_t lwork,
194
+ int * info);
195
+
196
+ cusolverStatus_t CUSOLVERAPI cusolverMgGetrs_bufferSize(
197
+ cusolverMgHandle_t handle,
198
+ cublasOperation_t TRANS,
199
+ int N,
200
+ int NRHS,
201
+ void * array_d_A[],
202
+ int IA,
203
+ int JA,
204
+ cudaLibMgMatrixDesc_t descrA,
205
+ int * array_d_IPIV[],
206
+ void * array_d_B[],
207
+ int IB,
208
+ int JB,
209
+ cudaLibMgMatrixDesc_t descrB,
210
+ cudaDataType computeType,
211
+ int64_t * lwork);
212
+
213
+ cusolverStatus_t CUSOLVERAPI cusolverMgGetrs(
214
+ cusolverMgHandle_t handle,
215
+ cublasOperation_t TRANS,
216
+ int N,
217
+ int NRHS,
218
+ void * array_d_A[],
219
+ int IA,
220
+ int JA,
221
+ cudaLibMgMatrixDesc_t descrA,
222
+ int * array_d_IPIV[],
223
+ void * array_d_B[],
224
+ int IB,
225
+ int JB,
226
+ cudaLibMgMatrixDesc_t descrB,
227
+ cudaDataType computeType,
228
+ void * array_d_work[],
229
+ int64_t lwork,
230
+ int * info);
231
+
232
+ cusolverStatus_t CUSOLVERAPI cusolverMgPotrf_bufferSize(
233
+ cusolverMgHandle_t handle,
234
+ cublasFillMode_t uplo,
235
+ int N,
236
+ void * array_d_A[],
237
+ int IA,
238
+ int JA,
239
+ cudaLibMgMatrixDesc_t descrA,
240
+ cudaDataType computeType,
241
+ int64_t * lwork);
242
+
243
+ cusolverStatus_t CUSOLVERAPI cusolverMgPotrf(
244
+ cusolverMgHandle_t handle,
245
+ cublasFillMode_t uplo,
246
+ int N,
247
+ void * array_d_A[],
248
+ int IA,
249
+ int JA,
250
+ cudaLibMgMatrixDesc_t descrA,
251
+ cudaDataType computeType,
252
+ void * array_d_work[],
253
+ int64_t lwork,
254
+ int * h_info);
255
+
256
+ cusolverStatus_t CUSOLVERAPI cusolverMgPotrs_bufferSize(
257
+ cusolverMgHandle_t handle,
258
+ cublasFillMode_t uplo,
259
+ int n,
260
+ int nrhs,
261
+ void * array_d_A[],
262
+ int IA,
263
+ int JA,
264
+ cudaLibMgMatrixDesc_t descrA,
265
+ void * array_d_B[],
266
+ int IB,
267
+ int JB,
268
+ cudaLibMgMatrixDesc_t descrB,
269
+ cudaDataType computeType,
270
+ int64_t * lwork);
271
+
272
+ cusolverStatus_t CUSOLVERAPI cusolverMgPotrs(
273
+ cusolverMgHandle_t handle,
274
+ cublasFillMode_t uplo,
275
+ int n,
276
+ int nrhs,
277
+ void * array_d_A[],
278
+ int IA,
279
+ int JA,
280
+ cudaLibMgMatrixDesc_t descrA,
281
+ void * array_d_B[],
282
+ int IB,
283
+ int JB,
284
+ cudaLibMgMatrixDesc_t descrB,
285
+ cudaDataType computeType,
286
+ void * array_d_work[],
287
+ int64_t lwork,
288
+ int * h_info);
289
+
290
+ cusolverStatus_t CUSOLVERAPI cusolverMgPotri_bufferSize(
291
+ cusolverMgHandle_t handle,
292
+ cublasFillMode_t uplo,
293
+ int N,
294
+ void * array_d_A[],
295
+ int IA,
296
+ int JA,
297
+ cudaLibMgMatrixDesc_t descrA,
298
+ cudaDataType computeType,
299
+ int64_t * lwork);
300
+
301
+ cusolverStatus_t CUSOLVERAPI cusolverMgPotri(
302
+ cusolverMgHandle_t handle,
303
+ cublasFillMode_t uplo,
304
+ int N,
305
+ void * array_d_A[],
306
+ int IA,
307
+ int JA,
308
+ cudaLibMgMatrixDesc_t descrA,
309
+ cudaDataType computeType,
310
+ void * array_d_work[],
311
+ int64_t lwork,
312
+ int * h_info);
313
+
314
+ #if defined(__cplusplus)
315
+ }
316
+ #endif /* __cplusplus */
317
+
318
+ #endif // CUSOLVERMG_H_
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverRf.h ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CUSOLVERRF_H_)
51
+ #define CUSOLVERRF_H_
52
+
53
+ #include "driver_types.h"
54
+ #include "cuComplex.h"
55
+ #include "cusolver_common.h"
56
+
57
+ #if defined(__cplusplus)
58
+ extern "C" {
59
+ #endif /* __cplusplus */
60
+
61
+ /* CUSOLVERRF mode */
62
+ typedef enum {
63
+ CUSOLVERRF_RESET_VALUES_FAST_MODE_OFF = 0, // default
64
+ CUSOLVERRF_RESET_VALUES_FAST_MODE_ON = 1
65
+ } cusolverRfResetValuesFastMode_t;
66
+
67
+ /* CUSOLVERRF matrix format */
68
+ typedef enum {
69
+ CUSOLVERRF_MATRIX_FORMAT_CSR = 0, // default
70
+ CUSOLVERRF_MATRIX_FORMAT_CSC = 1
71
+ } cusolverRfMatrixFormat_t;
72
+
73
+ /* CUSOLVERRF unit diagonal */
74
+ typedef enum {
75
+ CUSOLVERRF_UNIT_DIAGONAL_STORED_L = 0, // default
76
+ CUSOLVERRF_UNIT_DIAGONAL_STORED_U = 1,
77
+ CUSOLVERRF_UNIT_DIAGONAL_ASSUMED_L = 2,
78
+ CUSOLVERRF_UNIT_DIAGONAL_ASSUMED_U = 3
79
+ } cusolverRfUnitDiagonal_t;
80
+
81
+ /* CUSOLVERRF factorization algorithm */
82
+ typedef enum {
83
+ CUSOLVERRF_FACTORIZATION_ALG0 = 0, // default
84
+ CUSOLVERRF_FACTORIZATION_ALG1 = 1,
85
+ CUSOLVERRF_FACTORIZATION_ALG2 = 2,
86
+ } cusolverRfFactorization_t;
87
+
88
+ /* CUSOLVERRF triangular solve algorithm */
89
+ typedef enum {
90
+ CUSOLVERRF_TRIANGULAR_SOLVE_ALG1 = 1, // default
91
+ CUSOLVERRF_TRIANGULAR_SOLVE_ALG2 = 2,
92
+ CUSOLVERRF_TRIANGULAR_SOLVE_ALG3 = 3
93
+ } cusolverRfTriangularSolve_t;
94
+
95
+ /* CUSOLVERRF numeric boost report */
96
+ typedef enum {
97
+ CUSOLVERRF_NUMERIC_BOOST_NOT_USED = 0, // default
98
+ CUSOLVERRF_NUMERIC_BOOST_USED = 1
99
+ } cusolverRfNumericBoostReport_t;
100
+
101
+ /* Opaque structure holding CUSOLVERRF library common */
102
+ struct cusolverRfCommon;
103
+ typedef struct cusolverRfCommon* cusolverRfHandle_t;
104
+
105
+ /* CUSOLVERRF create (allocate memory) and destroy (free memory) in the handle
106
+ */
107
+ cusolverStatus_t CUSOLVERAPI cusolverRfCreate(cusolverRfHandle_t* handle);
108
+ cusolverStatus_t CUSOLVERAPI cusolverRfDestroy(cusolverRfHandle_t handle);
109
+
110
+ /* CUSOLVERRF set and get input format */
111
+ cusolverStatus_t CUSOLVERAPI cusolverRfGetMatrixFormat(
112
+ cusolverRfHandle_t handle,
113
+ cusolverRfMatrixFormat_t* format,
114
+ cusolverRfUnitDiagonal_t* diag);
115
+
116
+ cusolverStatus_t CUSOLVERAPI cusolverRfSetMatrixFormat(
117
+ cusolverRfHandle_t handle,
118
+ cusolverRfMatrixFormat_t format,
119
+ cusolverRfUnitDiagonal_t diag);
120
+
121
+ /* CUSOLVERRF set and get numeric properties */
122
+ cusolverStatus_t CUSOLVERAPI cusolverRfSetNumericProperties(
123
+ cusolverRfHandle_t handle,
124
+ double zero,
125
+ double boost);
126
+
127
+ cusolverStatus_t CUSOLVERAPI cusolverRfGetNumericProperties(
128
+ cusolverRfHandle_t handle,
129
+ double* zero,
130
+ double* boost);
131
+
132
+ cusolverStatus_t CUSOLVERAPI cusolverRfGetNumericBoostReport(
133
+ cusolverRfHandle_t handle,
134
+ cusolverRfNumericBoostReport_t* report);
135
+
136
+ /* CUSOLVERRF choose the triangular solve algorithm */
137
+ cusolverStatus_t CUSOLVERAPI cusolverRfSetAlgs(
138
+ cusolverRfHandle_t handle,
139
+ cusolverRfFactorization_t factAlg,
140
+ cusolverRfTriangularSolve_t solveAlg);
141
+
142
+ cusolverStatus_t CUSOLVERAPI cusolverRfGetAlgs(
143
+ cusolverRfHandle_t handle,
144
+ cusolverRfFactorization_t* factAlg,
145
+ cusolverRfTriangularSolve_t* solveAlg);
146
+
147
+ /* CUSOLVERRF set and get fast mode */
148
+ cusolverStatus_t CUSOLVERAPI cusolverRfGetResetValuesFastMode(
149
+ cusolverRfHandle_t handle,
150
+ cusolverRfResetValuesFastMode_t* fastMode);
151
+
152
+ cusolverStatus_t CUSOLVERAPI cusolverRfSetResetValuesFastMode(
153
+ cusolverRfHandle_t handle,
154
+ cusolverRfResetValuesFastMode_t fastMode);
155
+
156
+ /*** Non-Batched Routines ***/
157
+ /* CUSOLVERRF setup of internal structures from host or device memory */
158
+ cusolverStatus_t CUSOLVERAPI
159
+ cusolverRfSetupHost(/* Input (in the host memory) */
160
+ int n,
161
+ int nnzA,
162
+ int* h_csrRowPtrA,
163
+ int* h_csrColIndA,
164
+ double* h_csrValA,
165
+ int nnzL,
166
+ int* h_csrRowPtrL,
167
+ int* h_csrColIndL,
168
+ double* h_csrValL,
169
+ int nnzU,
170
+ int* h_csrRowPtrU,
171
+ int* h_csrColIndU,
172
+ double* h_csrValU,
173
+ int* h_P,
174
+ int* h_Q,
175
+ /* Output */
176
+ cusolverRfHandle_t handle);
177
+
178
+ cusolverStatus_t CUSOLVERAPI
179
+ cusolverRfSetupDevice(/* Input (in the device memory) */
180
+ int n,
181
+ int nnzA,
182
+ int* csrRowPtrA,
183
+ int* csrColIndA,
184
+ double* csrValA,
185
+ int nnzL,
186
+ int* csrRowPtrL,
187
+ int* csrColIndL,
188
+ double* csrValL,
189
+ int nnzU,
190
+ int* csrRowPtrU,
191
+ int* csrColIndU,
192
+ double* csrValU,
193
+ int* P,
194
+ int* Q,
195
+ /* Output */
196
+ cusolverRfHandle_t handle);
197
+
198
+ /* CUSOLVERRF update the matrix values (assuming the reordering, pivoting
199
+ and consequently the sparsity pattern of L and U did not change),
200
+ and zero out the remaining values. */
201
+ cusolverStatus_t CUSOLVERAPI
202
+ cusolverRfResetValues(/* Input (in the device memory) */
203
+ int n,
204
+ int nnzA,
205
+ int* csrRowPtrA,
206
+ int* csrColIndA,
207
+ double* csrValA,
208
+ int* P,
209
+ int* Q,
210
+ /* Output */
211
+ cusolverRfHandle_t handle);
212
+
213
+ /* CUSOLVERRF analysis (for parallelism) */
214
+ cusolverStatus_t CUSOLVERAPI cusolverRfAnalyze(cusolverRfHandle_t handle);
215
+
216
+ /* CUSOLVERRF re-factorization (for parallelism) */
217
+ cusolverStatus_t CUSOLVERAPI cusolverRfRefactor(cusolverRfHandle_t handle);
218
+
219
+ /* CUSOLVERRF extraction: Get L & U packed into a single matrix M */
220
+ cusolverStatus_t CUSOLVERAPI
221
+ cusolverRfAccessBundledFactorsDevice(/* Input */
222
+ cusolverRfHandle_t handle,
223
+ /* Output (in the host memory) */
224
+ int* nnzM,
225
+ /* Output (in the device memory) */
226
+ int** Mp,
227
+ int** Mi,
228
+ double** Mx);
229
+
230
+ cusolverStatus_t CUSOLVERAPI
231
+ cusolverRfExtractBundledFactorsHost(/* Input */
232
+ cusolverRfHandle_t handle,
233
+ /* Output (in the host memory) */
234
+ int* h_nnzM,
235
+ int** h_Mp,
236
+ int** h_Mi,
237
+ double** h_Mx);
238
+
239
+ /* CUSOLVERRF extraction: Get L & U individually */
240
+ cusolverStatus_t CUSOLVERAPI
241
+ cusolverRfExtractSplitFactorsHost(/* Input */
242
+ cusolverRfHandle_t handle,
243
+ /* Output (in the host memory) */
244
+ int* h_nnzL,
245
+ int** h_csrRowPtrL,
246
+ int** h_csrColIndL,
247
+ double** h_csrValL,
248
+ int* h_nnzU,
249
+ int** h_csrRowPtrU,
250
+ int** h_csrColIndU,
251
+ double** h_csrValU);
252
+
253
+ /* CUSOLVERRF (forward and backward triangular) solves */
254
+ cusolverStatus_t CUSOLVERAPI
255
+ cusolverRfSolve(/* Input (in the device memory) */
256
+ cusolverRfHandle_t handle,
257
+ int* P,
258
+ int* Q,
259
+ int nrhs, // only nrhs=1 is supported
260
+ double* Temp, // of size ldt*nrhs (ldt>=n)
261
+ int ldt,
262
+ /* Input/Output (in the device memory) */
263
+ double* XF,
264
+ /* Input */
265
+ int ldxf);
266
+
267
+ /*** Batched Routines ***/
268
+ /* CUSOLVERRF-batch setup of internal structures from host */
269
+ cusolverStatus_t CUSOLVERAPI
270
+ cusolverRfBatchSetupHost(/* Input (in the host memory)*/
271
+ int batchSize,
272
+ int n,
273
+ int nnzA,
274
+ int* h_csrRowPtrA,
275
+ int* h_csrColIndA,
276
+ double* h_csrValA_array[],
277
+ int nnzL,
278
+ int* h_csrRowPtrL,
279
+ int* h_csrColIndL,
280
+ double* h_csrValL,
281
+ int nnzU,
282
+ int* h_csrRowPtrU,
283
+ int* h_csrColIndU,
284
+ double* h_csrValU,
285
+ int* h_P,
286
+ int* h_Q,
287
+ /* Output (in the device memory) */
288
+ cusolverRfHandle_t handle);
289
+
290
+ /* CUSOLVERRF-batch update the matrix values (assuming the reordering,
291
+ pivoting and consequently the sparsity pattern of L and U did not change),
292
+ and zero out the remaining values. */
293
+ cusolverStatus_t CUSOLVERAPI
294
+ cusolverRfBatchResetValues(/* Input (in the device memory) */
295
+ int batchSize,
296
+ int n,
297
+ int nnzA,
298
+ int* csrRowPtrA,
299
+ int* csrColIndA,
300
+ double* csrValA_array[],
301
+ int* P,
302
+ int* Q,
303
+ /* Output */
304
+ cusolverRfHandle_t handle);
305
+
306
+ /* CUSOLVERRF-batch analysis (for parallelism) */
307
+ cusolverStatus_t CUSOLVERAPI
308
+ cusolverRfBatchAnalyze(cusolverRfHandle_t handle);
309
+
310
+ /* CUSOLVERRF-batch re-factorization (for parallelism) */
311
+ cusolverStatus_t CUSOLVERAPI
312
+ cusolverRfBatchRefactor(cusolverRfHandle_t handle);
313
+
314
+ /* CUSOLVERRF-batch (forward and backward triangular) solves */
315
+ cusolverStatus_t CUSOLVERAPI
316
+ cusolverRfBatchSolve(/* Input (in the device memory) */
317
+ cusolverRfHandle_t handle,
318
+ int* P,
319
+ int* Q,
320
+ int nrhs, // only nrhs=1 is supported
321
+ double* Temp, // of size 2*batchSize*(n*nrhs)
322
+ int ldt, // only ldt=n is supported
323
+ /* Input/Output (in the device memory) */
324
+ double* XF_array[],
325
+ /* Input */
326
+ int ldxf);
327
+
328
+ /* CUSOLVERRF-batch obtain the position of zero pivot */
329
+ cusolverStatus_t CUSOLVERAPI
330
+ cusolverRfBatchZeroPivot(/* Input */
331
+ cusolverRfHandle_t handle,
332
+ /* Output (in the host memory) */
333
+ int* position);
334
+
335
+ #if defined(__cplusplus)
336
+ }
337
+ #endif /* __cplusplus */
338
+
339
+ #endif /* CUSOLVERRF_H_ */
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverSp.h ADDED
@@ -0,0 +1,923 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CUSOLVERSP_H_)
51
+ #define CUSOLVERSP_H_
52
+
53
+ #include "cusparse.h"
54
+ #include "cublas_v2.h"
55
+ #include "cusolver_common.h"
56
+
57
+ #if defined(__cplusplus)
58
+ extern "C" {
59
+ #endif /* __cplusplus */
60
+
61
+ struct cusolverSpContext;
62
+ typedef struct cusolverSpContext *cusolverSpHandle_t;
63
+
64
+ struct csrqrInfo;
65
+ typedef struct csrqrInfo *csrqrInfo_t;
66
+
67
+ cusolverStatus_t CUSOLVERAPI cusolverSpCreate(cusolverSpHandle_t *handle);
68
+ cusolverStatus_t CUSOLVERAPI cusolverSpDestroy(cusolverSpHandle_t handle);
69
+ cusolverStatus_t CUSOLVERAPI
70
+ cusolverSpSetStream(cusolverSpHandle_t handle, cudaStream_t streamId);
71
+ cusolverStatus_t CUSOLVERAPI
72
+ cusolverSpGetStream(cusolverSpHandle_t handle, cudaStream_t *streamId);
73
+
74
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrissymHost(
75
+ cusolverSpHandle_t handle,
76
+ int m,
77
+ int nnzA,
78
+ const cusparseMatDescr_t descrA,
79
+ const int * csrRowPtrA,
80
+ const int * csrEndPtrA,
81
+ const int * csrColIndA,
82
+ int * issym);
83
+
84
+ /* -------- GPU linear solver by LU factorization
85
+ * solve A*x = b, A can be singular
86
+ * [ls] stands for linear solve
87
+ * [v] stands for vector
88
+ * [lu] stands for LU factorization
89
+ */
90
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsvluHost(
91
+ cusolverSpHandle_t handle,
92
+ int n,
93
+ int nnzA,
94
+ const cusparseMatDescr_t descrA,
95
+ const float * csrValA,
96
+ const int * csrRowPtrA,
97
+ const int * csrColIndA,
98
+ const float * b,
99
+ float tol,
100
+ int reorder,
101
+ float * x,
102
+ int * singularity);
103
+
104
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsvluHost(
105
+ cusolverSpHandle_t handle,
106
+ int n,
107
+ int nnzA,
108
+ const cusparseMatDescr_t descrA,
109
+ const double * csrValA,
110
+ const int * csrRowPtrA,
111
+ const int * csrColIndA,
112
+ const double * b,
113
+ double tol,
114
+ int reorder,
115
+ double * x,
116
+ int * singularity);
117
+
118
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsvluHost(
119
+ cusolverSpHandle_t handle,
120
+ int n,
121
+ int nnzA,
122
+ const cusparseMatDescr_t descrA,
123
+ const cuComplex * csrValA,
124
+ const int * csrRowPtrA,
125
+ const int * csrColIndA,
126
+ const cuComplex * b,
127
+ float tol,
128
+ int reorder,
129
+ cuComplex * x,
130
+ int * singularity);
131
+
132
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsvluHost(
133
+ cusolverSpHandle_t handle,
134
+ int n,
135
+ int nnzA,
136
+ const cusparseMatDescr_t descrA,
137
+ const cuDoubleComplex * csrValA,
138
+ const int * csrRowPtrA,
139
+ const int * csrColIndA,
140
+ const cuDoubleComplex * b,
141
+ double tol,
142
+ int reorder,
143
+ cuDoubleComplex * x,
144
+ int * singularity);
145
+
146
+ /* -------- GPU linear solver by QR factorization
147
+ * solve A*x = b, A can be singular
148
+ * [ls] stands for linear solve
149
+ * [v] stands for vector
150
+ * [qr] stands for QR factorization
151
+ */
152
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsvqr(
153
+ cusolverSpHandle_t handle,
154
+ int m,
155
+ int nnz,
156
+ const cusparseMatDescr_t descrA,
157
+ const float * csrVal,
158
+ const int * csrRowPtr,
159
+ const int * csrColInd,
160
+ const float * b,
161
+ float tol,
162
+ int reorder,
163
+ float * x,
164
+ int * singularity);
165
+
166
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsvqr(
167
+ cusolverSpHandle_t handle,
168
+ int m,
169
+ int nnz,
170
+ const cusparseMatDescr_t descrA,
171
+ const double * csrVal,
172
+ const int * csrRowPtr,
173
+ const int * csrColInd,
174
+ const double * b,
175
+ double tol,
176
+ int reorder,
177
+ double * x,
178
+ int * singularity);
179
+
180
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsvqr(
181
+ cusolverSpHandle_t handle,
182
+ int m,
183
+ int nnz,
184
+ const cusparseMatDescr_t descrA,
185
+ const cuComplex * csrVal,
186
+ const int * csrRowPtr,
187
+ const int * csrColInd,
188
+ const cuComplex * b,
189
+ float tol,
190
+ int reorder,
191
+ cuComplex * x,
192
+ int * singularity);
193
+
194
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsvqr(
195
+ cusolverSpHandle_t handle,
196
+ int m,
197
+ int nnz,
198
+ const cusparseMatDescr_t descrA,
199
+ const cuDoubleComplex * csrVal,
200
+ const int * csrRowPtr,
201
+ const int * csrColInd,
202
+ const cuDoubleComplex * b,
203
+ double tol,
204
+ int reorder,
205
+ cuDoubleComplex * x,
206
+ int * singularity);
207
+
208
+ /* -------- CPU linear solver by QR factorization
209
+ * solve A*x = b, A can be singular
210
+ * [ls] stands for linear solve
211
+ * [v] stands for vector
212
+ * [qr] stands for QR factorization
213
+ */
214
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsvqrHost(
215
+ cusolverSpHandle_t handle,
216
+ int m,
217
+ int nnz,
218
+ const cusparseMatDescr_t descrA,
219
+ const float * csrValA,
220
+ const int * csrRowPtrA,
221
+ const int * csrColIndA,
222
+ const float * b,
223
+ float tol,
224
+ int reorder,
225
+ float * x,
226
+ int * singularity);
227
+
228
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsvqrHost(
229
+ cusolverSpHandle_t handle,
230
+ int m,
231
+ int nnz,
232
+ const cusparseMatDescr_t descrA,
233
+ const double * csrValA,
234
+ const int * csrRowPtrA,
235
+ const int * csrColIndA,
236
+ const double * b,
237
+ double tol,
238
+ int reorder,
239
+ double * x,
240
+ int * singularity);
241
+
242
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsvqrHost(
243
+ cusolverSpHandle_t handle,
244
+ int m,
245
+ int nnz,
246
+ const cusparseMatDescr_t descrA,
247
+ const cuComplex * csrValA,
248
+ const int * csrRowPtrA,
249
+ const int * csrColIndA,
250
+ const cuComplex * b,
251
+ float tol,
252
+ int reorder,
253
+ cuComplex * x,
254
+ int * singularity);
255
+
256
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsvqrHost(
257
+ cusolverSpHandle_t handle,
258
+ int m,
259
+ int nnz,
260
+ const cusparseMatDescr_t descrA,
261
+ const cuDoubleComplex * csrValA,
262
+ const int * csrRowPtrA,
263
+ const int * csrColIndA,
264
+ const cuDoubleComplex * b,
265
+ double tol,
266
+ int reorder,
267
+ cuDoubleComplex * x,
268
+ int * singularity);
269
+
270
+ /* -------- CPU linear solver by Cholesky factorization
271
+ * solve A*x = b, A can be singular
272
+ * [ls] stands for linear solve
273
+ * [v] stands for vector
274
+ * [chol] stands for Cholesky factorization
275
+ *
276
+ * Only works for symmetric positive definite matrix.
277
+ * The upper part of A is ignored.
278
+ */
279
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsvcholHost(
280
+ cusolverSpHandle_t handle,
281
+ int m,
282
+ int nnz,
283
+ const cusparseMatDescr_t descrA,
284
+ const float * csrVal,
285
+ const int * csrRowPtr,
286
+ const int * csrColInd,
287
+ const float * b,
288
+ float tol,
289
+ int reorder,
290
+ float * x,
291
+ int * singularity);
292
+
293
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsvcholHost(
294
+ cusolverSpHandle_t handle,
295
+ int m,
296
+ int nnz,
297
+ const cusparseMatDescr_t descrA,
298
+ const double * csrVal,
299
+ const int * csrRowPtr,
300
+ const int * csrColInd,
301
+ const double * b,
302
+ double tol,
303
+ int reorder,
304
+ double * x,
305
+ int * singularity);
306
+
307
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsvcholHost(
308
+ cusolverSpHandle_t handle,
309
+ int m,
310
+ int nnz,
311
+ const cusparseMatDescr_t descrA,
312
+ const cuComplex * csrVal,
313
+ const int * csrRowPtr,
314
+ const int * csrColInd,
315
+ const cuComplex * b,
316
+ float tol,
317
+ int reorder,
318
+ cuComplex * x,
319
+ int * singularity);
320
+
321
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsvcholHost(
322
+ cusolverSpHandle_t handle,
323
+ int m,
324
+ int nnz,
325
+ const cusparseMatDescr_t descrA,
326
+ const cuDoubleComplex * csrVal,
327
+ const int * csrRowPtr,
328
+ const int * csrColInd,
329
+ const cuDoubleComplex * b,
330
+ double tol,
331
+ int reorder,
332
+ cuDoubleComplex * x,
333
+ int * singularity);
334
+
335
+ /* -------- GPU linear solver by Cholesky factorization
336
+ * solve A*x = b, A can be singular
337
+ * [ls] stands for linear solve
338
+ * [v] stands for vector
339
+ * [chol] stands for Cholesky factorization
340
+ *
341
+ * Only works for symmetric positive definite matrix.
342
+ * The upper part of A is ignored.
343
+ */
344
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsvchol(
345
+ cusolverSpHandle_t handle,
346
+ int m,
347
+ int nnz,
348
+ const cusparseMatDescr_t descrA,
349
+ const float * csrVal,
350
+ const int * csrRowPtr,
351
+ const int * csrColInd,
352
+ const float * b,
353
+ float tol,
354
+ int reorder,
355
+ // output
356
+ float *x,
357
+ int * singularity);
358
+
359
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsvchol(
360
+ cusolverSpHandle_t handle,
361
+ int m,
362
+ int nnz,
363
+ const cusparseMatDescr_t descrA,
364
+ const double * csrVal,
365
+ const int * csrRowPtr,
366
+ const int * csrColInd,
367
+ const double * b,
368
+ double tol,
369
+ int reorder,
370
+ // output
371
+ double *x,
372
+ int * singularity);
373
+
374
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsvchol(
375
+ cusolverSpHandle_t handle,
376
+ int m,
377
+ int nnz,
378
+ const cusparseMatDescr_t descrA,
379
+ const cuComplex * csrVal,
380
+ const int * csrRowPtr,
381
+ const int * csrColInd,
382
+ const cuComplex * b,
383
+ float tol,
384
+ int reorder,
385
+ // output
386
+ cuComplex *x,
387
+ int * singularity);
388
+
389
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsvchol(
390
+ cusolverSpHandle_t handle,
391
+ int m,
392
+ int nnz,
393
+ const cusparseMatDescr_t descrA,
394
+ const cuDoubleComplex * csrVal,
395
+ const int * csrRowPtr,
396
+ const int * csrColInd,
397
+ const cuDoubleComplex * b,
398
+ double tol,
399
+ int reorder,
400
+ // output
401
+ cuDoubleComplex *x,
402
+ int * singularity);
403
+
404
+ /* ----------- CPU least square solver by QR factorization
405
+ * solve min|b - A*x|
406
+ * [lsq] stands for least square
407
+ * [v] stands for vector
408
+ * [qr] stands for QR factorization
409
+ */
410
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsqvqrHost(
411
+ cusolverSpHandle_t handle,
412
+ int m,
413
+ int n,
414
+ int nnz,
415
+ const cusparseMatDescr_t descrA,
416
+ const float * csrValA,
417
+ const int * csrRowPtrA,
418
+ const int * csrColIndA,
419
+ const float * b,
420
+ float tol,
421
+ int * rankA,
422
+ float * x,
423
+ int * p,
424
+ float * min_norm);
425
+
426
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsqvqrHost(
427
+ cusolverSpHandle_t handle,
428
+ int m,
429
+ int n,
430
+ int nnz,
431
+ const cusparseMatDescr_t descrA,
432
+ const double * csrValA,
433
+ const int * csrRowPtrA,
434
+ const int * csrColIndA,
435
+ const double * b,
436
+ double tol,
437
+ int * rankA,
438
+ double * x,
439
+ int * p,
440
+ double * min_norm);
441
+
442
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsqvqrHost(
443
+ cusolverSpHandle_t handle,
444
+ int m,
445
+ int n,
446
+ int nnz,
447
+ const cusparseMatDescr_t descrA,
448
+ const cuComplex * csrValA,
449
+ const int * csrRowPtrA,
450
+ const int * csrColIndA,
451
+ const cuComplex * b,
452
+ float tol,
453
+ int * rankA,
454
+ cuComplex * x,
455
+ int * p,
456
+ float * min_norm);
457
+
458
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsqvqrHost(
459
+ cusolverSpHandle_t handle,
460
+ int m,
461
+ int n,
462
+ int nnz,
463
+ const cusparseMatDescr_t descrA,
464
+ const cuDoubleComplex * csrValA,
465
+ const int * csrRowPtrA,
466
+ const int * csrColIndA,
467
+ const cuDoubleComplex * b,
468
+ double tol,
469
+ int * rankA,
470
+ cuDoubleComplex * x,
471
+ int * p,
472
+ double * min_norm);
473
+
474
+ /* --------- CPU eigenvalue solver by shift inverse
475
+ * solve A*x = lambda * x
476
+ * where lambda is the eigenvalue nearest mu0.
477
+ * [eig] stands for eigenvalue solver
478
+ * [si] stands for shift-inverse
479
+ */
480
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsreigvsiHost(
481
+ cusolverSpHandle_t handle,
482
+ int m,
483
+ int nnz,
484
+ const cusparseMatDescr_t descrA,
485
+ const float * csrValA,
486
+ const int * csrRowPtrA,
487
+ const int * csrColIndA,
488
+ float mu0,
489
+ const float * x0,
490
+ int maxite,
491
+ float tol,
492
+ float * mu,
493
+ float * x);
494
+
495
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsreigvsiHost(
496
+ cusolverSpHandle_t handle,
497
+ int m,
498
+ int nnz,
499
+ const cusparseMatDescr_t descrA,
500
+ const double * csrValA,
501
+ const int * csrRowPtrA,
502
+ const int * csrColIndA,
503
+ double mu0,
504
+ const double * x0,
505
+ int maxite,
506
+ double tol,
507
+ double * mu,
508
+ double * x);
509
+
510
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsreigvsiHost(
511
+ cusolverSpHandle_t handle,
512
+ int m,
513
+ int nnz,
514
+ const cusparseMatDescr_t descrA,
515
+ const cuComplex * csrValA,
516
+ const int * csrRowPtrA,
517
+ const int * csrColIndA,
518
+ cuComplex mu0,
519
+ const cuComplex * x0,
520
+ int maxite,
521
+ float tol,
522
+ cuComplex * mu,
523
+ cuComplex * x);
524
+
525
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsreigvsiHost(
526
+ cusolverSpHandle_t handle,
527
+ int m,
528
+ int nnz,
529
+ const cusparseMatDescr_t descrA,
530
+ const cuDoubleComplex * csrValA,
531
+ const int * csrRowPtrA,
532
+ const int * csrColIndA,
533
+ cuDoubleComplex mu0,
534
+ const cuDoubleComplex * x0,
535
+ int maxite,
536
+ double tol,
537
+ cuDoubleComplex * mu,
538
+ cuDoubleComplex * x);
539
+
540
+ /* --------- GPU eigenvalue solver by shift inverse
541
+ * solve A*x = lambda * x
542
+ * where lambda is the eigenvalue nearest mu0.
543
+ * [eig] stands for eigenvalue solver
544
+ * [si] stands for shift-inverse
545
+ */
546
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsreigvsi(
547
+ cusolverSpHandle_t handle,
548
+ int m,
549
+ int nnz,
550
+ const cusparseMatDescr_t descrA,
551
+ const float * csrValA,
552
+ const int * csrRowPtrA,
553
+ const int * csrColIndA,
554
+ float mu0,
555
+ const float * x0,
556
+ int maxite,
557
+ float eps,
558
+ float * mu,
559
+ float * x);
560
+
561
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsreigvsi(
562
+ cusolverSpHandle_t handle,
563
+ int m,
564
+ int nnz,
565
+ const cusparseMatDescr_t descrA,
566
+ const double * csrValA,
567
+ const int * csrRowPtrA,
568
+ const int * csrColIndA,
569
+ double mu0,
570
+ const double * x0,
571
+ int maxite,
572
+ double eps,
573
+ double * mu,
574
+ double * x);
575
+
576
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsreigvsi(
577
+ cusolverSpHandle_t handle,
578
+ int m,
579
+ int nnz,
580
+ const cusparseMatDescr_t descrA,
581
+ const cuComplex * csrValA,
582
+ const int * csrRowPtrA,
583
+ const int * csrColIndA,
584
+ cuComplex mu0,
585
+ const cuComplex * x0,
586
+ int maxite,
587
+ float eps,
588
+ cuComplex * mu,
589
+ cuComplex * x);
590
+
591
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsreigvsi(
592
+ cusolverSpHandle_t handle,
593
+ int m,
594
+ int nnz,
595
+ const cusparseMatDescr_t descrA,
596
+ const cuDoubleComplex * csrValA,
597
+ const int * csrRowPtrA,
598
+ const int * csrColIndA,
599
+ cuDoubleComplex mu0,
600
+ const cuDoubleComplex * x0,
601
+ int maxite,
602
+ double eps,
603
+ cuDoubleComplex * mu,
604
+ cuDoubleComplex * x);
605
+
606
+ // ----------- enclosed eigenvalues
607
+
608
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsreigsHost(
609
+ cusolverSpHandle_t handle,
610
+ int m,
611
+ int nnz,
612
+ const cusparseMatDescr_t descrA,
613
+ const float * csrValA,
614
+ const int * csrRowPtrA,
615
+ const int * csrColIndA,
616
+ cuComplex left_bottom_corner,
617
+ cuComplex right_upper_corner,
618
+ int * num_eigs);
619
+
620
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsreigsHost(
621
+ cusolverSpHandle_t handle,
622
+ int m,
623
+ int nnz,
624
+ const cusparseMatDescr_t descrA,
625
+ const double * csrValA,
626
+ const int * csrRowPtrA,
627
+ const int * csrColIndA,
628
+ cuDoubleComplex left_bottom_corner,
629
+ cuDoubleComplex right_upper_corner,
630
+ int * num_eigs);
631
+
632
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsreigsHost(
633
+ cusolverSpHandle_t handle,
634
+ int m,
635
+ int nnz,
636
+ const cusparseMatDescr_t descrA,
637
+ const cuComplex * csrValA,
638
+ const int * csrRowPtrA,
639
+ const int * csrColIndA,
640
+ cuComplex left_bottom_corner,
641
+ cuComplex right_upper_corner,
642
+ int * num_eigs);
643
+
644
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsreigsHost(
645
+ cusolverSpHandle_t handle,
646
+ int m,
647
+ int nnz,
648
+ const cusparseMatDescr_t descrA,
649
+ const cuDoubleComplex * csrValA,
650
+ const int * csrRowPtrA,
651
+ const int * csrColIndA,
652
+ cuDoubleComplex left_bottom_corner,
653
+ cuDoubleComplex right_upper_corner,
654
+ int * num_eigs);
655
+
656
+ /* --------- CPU symrcm
657
+ * Symmetric reverse Cuthill McKee permutation
658
+ *
659
+ */
660
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrsymrcmHost(
661
+ cusolverSpHandle_t handle,
662
+ int n,
663
+ int nnzA,
664
+ const cusparseMatDescr_t descrA,
665
+ const int * csrRowPtrA,
666
+ const int * csrColIndA,
667
+ int * p);
668
+
669
+ /* --------- CPU symmdq
670
+ * Symmetric minimum degree algorithm by quotient graph
671
+ *
672
+ */
673
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrsymmdqHost(
674
+ cusolverSpHandle_t handle,
675
+ int n,
676
+ int nnzA,
677
+ const cusparseMatDescr_t descrA,
678
+ const int * csrRowPtrA,
679
+ const int * csrColIndA,
680
+ int * p);
681
+
682
+ /* --------- CPU symmdq
683
+ * Symmetric Approximate minimum degree algorithm by quotient graph
684
+ *
685
+ */
686
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrsymamdHost(
687
+ cusolverSpHandle_t handle,
688
+ int n,
689
+ int nnzA,
690
+ const cusparseMatDescr_t descrA,
691
+ const int * csrRowPtrA,
692
+ const int * csrColIndA,
693
+ int * p);
694
+
695
+ /* --------- CPU metis
696
+ * symmetric reordering
697
+ */
698
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrmetisndHost(
699
+ cusolverSpHandle_t handle,
700
+ int n,
701
+ int nnzA,
702
+ const cusparseMatDescr_t descrA,
703
+ const int * csrRowPtrA,
704
+ const int * csrColIndA,
705
+ const int64_t * options,
706
+ int * p);
707
+
708
+ /* --------- CPU zfd
709
+ * Zero free diagonal reordering
710
+ */
711
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrzfdHost(
712
+ cusolverSpHandle_t handle,
713
+ int n,
714
+ int nnz,
715
+ const cusparseMatDescr_t descrA,
716
+ const float * csrValA,
717
+ const int * csrRowPtrA,
718
+ const int * csrColIndA,
719
+ int * P,
720
+ int * numnz);
721
+
722
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrzfdHost(
723
+ cusolverSpHandle_t handle,
724
+ int n,
725
+ int nnz,
726
+ const cusparseMatDescr_t descrA,
727
+ const double * csrValA,
728
+ const int * csrRowPtrA,
729
+ const int * csrColIndA,
730
+ int * P,
731
+ int * numnz);
732
+
733
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrzfdHost(
734
+ cusolverSpHandle_t handle,
735
+ int n,
736
+ int nnz,
737
+ const cusparseMatDescr_t descrA,
738
+ const cuComplex * csrValA,
739
+ const int * csrRowPtrA,
740
+ const int * csrColIndA,
741
+ int * P,
742
+ int * numnz);
743
+
744
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrzfdHost(
745
+ cusolverSpHandle_t handle,
746
+ int n,
747
+ int nnz,
748
+ const cusparseMatDescr_t descrA,
749
+ const cuDoubleComplex * csrValA,
750
+ const int * csrRowPtrA,
751
+ const int * csrColIndA,
752
+ int * P,
753
+ int * numnz);
754
+
755
+ /* --------- CPU permuation
756
+ * P*A*Q^T
757
+ *
758
+ */
759
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrperm_bufferSizeHost(
760
+ cusolverSpHandle_t handle,
761
+ int m,
762
+ int n,
763
+ int nnzA,
764
+ const cusparseMatDescr_t descrA,
765
+ const int * csrRowPtrA,
766
+ const int * csrColIndA,
767
+ const int * p,
768
+ const int * q,
769
+ size_t * bufferSizeInBytes);
770
+
771
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrpermHost(
772
+ cusolverSpHandle_t handle,
773
+ int m,
774
+ int n,
775
+ int nnzA,
776
+ const cusparseMatDescr_t descrA,
777
+ int * csrRowPtrA,
778
+ int * csrColIndA,
779
+ const int * p,
780
+ const int * q,
781
+ int * map,
782
+ void * pBuffer);
783
+
784
+ /*
785
+ * Low-level API: Batched QR
786
+ *
787
+ */
788
+
789
+ cusolverStatus_t CUSOLVERAPI cusolverSpCreateCsrqrInfo(csrqrInfo_t *info);
790
+
791
+ cusolverStatus_t CUSOLVERAPI cusolverSpDestroyCsrqrInfo(csrqrInfo_t info);
792
+
793
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrqrAnalysisBatched(
794
+ cusolverSpHandle_t handle,
795
+ int m,
796
+ int n,
797
+ int nnzA,
798
+ const cusparseMatDescr_t descrA,
799
+ const int * csrRowPtrA,
800
+ const int * csrColIndA,
801
+ csrqrInfo_t info);
802
+
803
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrBufferInfoBatched(
804
+ cusolverSpHandle_t handle,
805
+ int m,
806
+ int n,
807
+ int nnz,
808
+ const cusparseMatDescr_t descrA,
809
+ const float * csrVal,
810
+ const int * csrRowPtr,
811
+ const int * csrColInd,
812
+ int batchSize,
813
+ csrqrInfo_t info,
814
+ size_t * internalDataInBytes,
815
+ size_t * workspaceInBytes);
816
+
817
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrBufferInfoBatched(
818
+ cusolverSpHandle_t handle,
819
+ int m,
820
+ int n,
821
+ int nnz,
822
+ const cusparseMatDescr_t descrA,
823
+ const double * csrVal,
824
+ const int * csrRowPtr,
825
+ const int * csrColInd,
826
+ int batchSize,
827
+ csrqrInfo_t info,
828
+ size_t * internalDataInBytes,
829
+ size_t * workspaceInBytes);
830
+
831
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrBufferInfoBatched(
832
+ cusolverSpHandle_t handle,
833
+ int m,
834
+ int n,
835
+ int nnz,
836
+ const cusparseMatDescr_t descrA,
837
+ const cuComplex * csrVal,
838
+ const int * csrRowPtr,
839
+ const int * csrColInd,
840
+ int batchSize,
841
+ csrqrInfo_t info,
842
+ size_t * internalDataInBytes,
843
+ size_t * workspaceInBytes);
844
+
845
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrBufferInfoBatched(
846
+ cusolverSpHandle_t handle,
847
+ int m,
848
+ int n,
849
+ int nnz,
850
+ const cusparseMatDescr_t descrA,
851
+ const cuDoubleComplex * csrVal,
852
+ const int * csrRowPtr,
853
+ const int * csrColInd,
854
+ int batchSize,
855
+ csrqrInfo_t info,
856
+ size_t * internalDataInBytes,
857
+ size_t * workspaceInBytes);
858
+
859
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrsvBatched(
860
+ cusolverSpHandle_t handle,
861
+ int m,
862
+ int n,
863
+ int nnz,
864
+ const cusparseMatDescr_t descrA,
865
+ const float * csrValA,
866
+ const int * csrRowPtrA,
867
+ const int * csrColIndA,
868
+ const float * b,
869
+ float * x,
870
+ int batchSize,
871
+ csrqrInfo_t info,
872
+ void * pBuffer);
873
+
874
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrsvBatched(
875
+ cusolverSpHandle_t handle,
876
+ int m,
877
+ int n,
878
+ int nnz,
879
+ const cusparseMatDescr_t descrA,
880
+ const double * csrValA,
881
+ const int * csrRowPtrA,
882
+ const int * csrColIndA,
883
+ const double * b,
884
+ double * x,
885
+ int batchSize,
886
+ csrqrInfo_t info,
887
+ void * pBuffer);
888
+
889
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrsvBatched(
890
+ cusolverSpHandle_t handle,
891
+ int m,
892
+ int n,
893
+ int nnz,
894
+ const cusparseMatDescr_t descrA,
895
+ const cuComplex * csrValA,
896
+ const int * csrRowPtrA,
897
+ const int * csrColIndA,
898
+ const cuComplex * b,
899
+ cuComplex * x,
900
+ int batchSize,
901
+ csrqrInfo_t info,
902
+ void * pBuffer);
903
+
904
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrsvBatched(
905
+ cusolverSpHandle_t handle,
906
+ int m,
907
+ int n,
908
+ int nnz,
909
+ const cusparseMatDescr_t descrA,
910
+ const cuDoubleComplex * csrValA,
911
+ const int * csrRowPtrA,
912
+ const int * csrColIndA,
913
+ const cuDoubleComplex * b,
914
+ cuDoubleComplex * x,
915
+ int batchSize,
916
+ csrqrInfo_t info,
917
+ void * pBuffer);
918
+
919
+ #if defined(__cplusplus)
920
+ }
921
+ #endif /* __cplusplus */
922
+
923
+ #endif // define CUSOLVERSP_H_
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h ADDED
@@ -0,0 +1,1107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2015 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CUSOLVERSP_LOWLEVEL_PREVIEW_H_)
51
+ #define CUSOLVERSP_LOWLEVEL_PREVIEW_H_
52
+
53
+ #include "cusolverSp.h"
54
+
55
+ #if defined(__cplusplus)
56
+ extern "C" {
57
+ #endif /* __cplusplus */
58
+
59
+ struct csrluInfoHost;
60
+ typedef struct csrluInfoHost *csrluInfoHost_t;
61
+
62
+ struct csrqrInfoHost;
63
+ typedef struct csrqrInfoHost *csrqrInfoHost_t;
64
+
65
+ struct csrcholInfoHost;
66
+ typedef struct csrcholInfoHost *csrcholInfoHost_t;
67
+
68
+ struct csrcholInfo;
69
+ typedef struct csrcholInfo *csrcholInfo_t;
70
+
71
+ /*
72
+ * Low level API for CPU LU
73
+ *
74
+ */
75
+ cusolverStatus_t CUSOLVERAPI
76
+ cusolverSpCreateCsrluInfoHost(csrluInfoHost_t *info);
77
+
78
+ cusolverStatus_t CUSOLVERAPI
79
+ cusolverSpDestroyCsrluInfoHost(csrluInfoHost_t info);
80
+
81
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrluAnalysisHost(
82
+ cusolverSpHandle_t handle,
83
+ int n,
84
+ int nnzA,
85
+ const cusparseMatDescr_t descrA,
86
+ const int * csrRowPtrA,
87
+ const int * csrColIndA,
88
+ csrluInfoHost_t info);
89
+
90
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrluBufferInfoHost(
91
+ cusolverSpHandle_t handle,
92
+ int n,
93
+ int nnzA,
94
+ const cusparseMatDescr_t descrA,
95
+ const float * csrValA,
96
+ const int * csrRowPtrA,
97
+ const int * csrColIndA,
98
+ csrluInfoHost_t info,
99
+ size_t * internalDataInBytes,
100
+ size_t * workspaceInBytes);
101
+
102
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrluBufferInfoHost(
103
+ cusolverSpHandle_t handle,
104
+ int n,
105
+ int nnzA,
106
+ const cusparseMatDescr_t descrA,
107
+ const double * csrValA,
108
+ const int * csrRowPtrA,
109
+ const int * csrColIndA,
110
+ csrluInfoHost_t info,
111
+ size_t * internalDataInBytes,
112
+ size_t * workspaceInBytes);
113
+
114
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrluBufferInfoHost(
115
+ cusolverSpHandle_t handle,
116
+ int n,
117
+ int nnzA,
118
+ const cusparseMatDescr_t descrA,
119
+ const cuComplex * csrValA,
120
+ const int * csrRowPtrA,
121
+ const int * csrColIndA,
122
+ csrluInfoHost_t info,
123
+ size_t * internalDataInBytes,
124
+ size_t * workspaceInBytes);
125
+
126
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrluBufferInfoHost(
127
+ cusolverSpHandle_t handle,
128
+ int n,
129
+ int nnzA,
130
+ const cusparseMatDescr_t descrA,
131
+ const cuDoubleComplex * csrValA,
132
+ const int * csrRowPtrA,
133
+ const int * csrColIndA,
134
+ csrluInfoHost_t info,
135
+ size_t * internalDataInBytes,
136
+ size_t * workspaceInBytes);
137
+
138
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrluFactorHost(
139
+ cusolverSpHandle_t handle,
140
+ int n,
141
+ int nnzA,
142
+ const cusparseMatDescr_t descrA,
143
+ const float * csrValA,
144
+ const int * csrRowPtrA,
145
+ const int * csrColIndA,
146
+ csrluInfoHost_t info,
147
+ float pivot_threshold,
148
+ void * pBuffer);
149
+
150
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrluFactorHost(
151
+ cusolverSpHandle_t handle,
152
+ int n,
153
+ int nnzA,
154
+ const cusparseMatDescr_t descrA,
155
+ const double * csrValA,
156
+ const int * csrRowPtrA,
157
+ const int * csrColIndA,
158
+ csrluInfoHost_t info,
159
+ double pivot_threshold,
160
+ void * pBuffer);
161
+
162
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrluFactorHost(
163
+ cusolverSpHandle_t handle,
164
+ int n,
165
+ int nnzA,
166
+ const cusparseMatDescr_t descrA,
167
+ const cuComplex * csrValA,
168
+ const int * csrRowPtrA,
169
+ const int * csrColIndA,
170
+ csrluInfoHost_t info,
171
+ float pivot_threshold,
172
+ void * pBuffer);
173
+
174
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrluFactorHost(
175
+ cusolverSpHandle_t handle,
176
+ int n,
177
+ int nnzA,
178
+ const cusparseMatDescr_t descrA,
179
+ const cuDoubleComplex * csrValA,
180
+ const int * csrRowPtrA,
181
+ const int * csrColIndA,
182
+ csrluInfoHost_t info,
183
+ double pivot_threshold,
184
+ void * pBuffer);
185
+
186
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrluZeroPivotHost(
187
+ cusolverSpHandle_t handle,
188
+ csrluInfoHost_t info,
189
+ float tol,
190
+ int * position);
191
+
192
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrluZeroPivotHost(
193
+ cusolverSpHandle_t handle,
194
+ csrluInfoHost_t info,
195
+ double tol,
196
+ int * position);
197
+
198
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrluZeroPivotHost(
199
+ cusolverSpHandle_t handle,
200
+ csrluInfoHost_t info,
201
+ float tol,
202
+ int * position);
203
+
204
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrluZeroPivotHost(
205
+ cusolverSpHandle_t handle,
206
+ csrluInfoHost_t info,
207
+ double tol,
208
+ int * position);
209
+
210
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrluSolveHost(
211
+ cusolverSpHandle_t handle,
212
+ int n,
213
+ const float * b,
214
+ float * x,
215
+ csrluInfoHost_t info,
216
+ void * pBuffer);
217
+
218
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrluSolveHost(
219
+ cusolverSpHandle_t handle,
220
+ int n,
221
+ const double * b,
222
+ double * x,
223
+ csrluInfoHost_t info,
224
+ void * pBuffer);
225
+
226
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrluSolveHost(
227
+ cusolverSpHandle_t handle,
228
+ int n,
229
+ const cuComplex * b,
230
+ cuComplex * x,
231
+ csrluInfoHost_t info,
232
+ void * pBuffer);
233
+
234
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrluSolveHost(
235
+ cusolverSpHandle_t handle,
236
+ int n,
237
+ const cuDoubleComplex *b,
238
+ cuDoubleComplex * x,
239
+ csrluInfoHost_t info,
240
+ void * pBuffer);
241
+
242
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrluNnzHost(
243
+ cusolverSpHandle_t handle,
244
+ int * nnzLRef,
245
+ int * nnzURef,
246
+ csrluInfoHost_t info);
247
+
248
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrluExtractHost(
249
+ cusolverSpHandle_t handle,
250
+ int * P,
251
+ int * Q,
252
+ const cusparseMatDescr_t descrL,
253
+ float * csrValL,
254
+ int * csrRowPtrL,
255
+ int * csrColIndL,
256
+ const cusparseMatDescr_t descrU,
257
+ float * csrValU,
258
+ int * csrRowPtrU,
259
+ int * csrColIndU,
260
+ csrluInfoHost_t info,
261
+ void * pBuffer);
262
+
263
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrluExtractHost(
264
+ cusolverSpHandle_t handle,
265
+ int * P,
266
+ int * Q,
267
+ const cusparseMatDescr_t descrL,
268
+ double * csrValL,
269
+ int * csrRowPtrL,
270
+ int * csrColIndL,
271
+ const cusparseMatDescr_t descrU,
272
+ double * csrValU,
273
+ int * csrRowPtrU,
274
+ int * csrColIndU,
275
+ csrluInfoHost_t info,
276
+ void * pBuffer);
277
+
278
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrluExtractHost(
279
+ cusolverSpHandle_t handle,
280
+ int * P,
281
+ int * Q,
282
+ const cusparseMatDescr_t descrL,
283
+ cuComplex * csrValL,
284
+ int * csrRowPtrL,
285
+ int * csrColIndL,
286
+ const cusparseMatDescr_t descrU,
287
+ cuComplex * csrValU,
288
+ int * csrRowPtrU,
289
+ int * csrColIndU,
290
+ csrluInfoHost_t info,
291
+ void * pBuffer);
292
+
293
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrluExtractHost(
294
+ cusolverSpHandle_t handle,
295
+ int * P,
296
+ int * Q,
297
+ const cusparseMatDescr_t descrL,
298
+ cuDoubleComplex * csrValL,
299
+ int * csrRowPtrL,
300
+ int * csrColIndL,
301
+ const cusparseMatDescr_t descrU,
302
+ cuDoubleComplex * csrValU,
303
+ int * csrRowPtrU,
304
+ int * csrColIndU,
305
+ csrluInfoHost_t info,
306
+ void * pBuffer);
307
+
308
+ /*
309
+ * Low level API for CPU QR
310
+ *
311
+ */
312
+ cusolverStatus_t CUSOLVERAPI
313
+ cusolverSpCreateCsrqrInfoHost(csrqrInfoHost_t *info);
314
+
315
+ cusolverStatus_t CUSOLVERAPI
316
+ cusolverSpDestroyCsrqrInfoHost(csrqrInfoHost_t info);
317
+
318
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrqrAnalysisHost(
319
+ cusolverSpHandle_t handle,
320
+ int m,
321
+ int n,
322
+ int nnzA,
323
+ const cusparseMatDescr_t descrA,
324
+ const int * csrRowPtrA,
325
+ const int * csrColIndA,
326
+ csrqrInfoHost_t info);
327
+
328
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrBufferInfoHost(
329
+ cusolverSpHandle_t handle,
330
+ int m,
331
+ int n,
332
+ int nnzA,
333
+ const cusparseMatDescr_t descrA,
334
+ const float * csrValA,
335
+ const int * csrRowPtrA,
336
+ const int * csrColIndA,
337
+ csrqrInfoHost_t info,
338
+ size_t * internalDataInBytes,
339
+ size_t * workspaceInBytes);
340
+
341
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrBufferInfoHost(
342
+ cusolverSpHandle_t handle,
343
+ int m,
344
+ int n,
345
+ int nnzA,
346
+ const cusparseMatDescr_t descrA,
347
+ const double * csrValA,
348
+ const int * csrRowPtrA,
349
+ const int * csrColIndA,
350
+ csrqrInfoHost_t info,
351
+ size_t * internalDataInBytes,
352
+ size_t * workspaceInBytes);
353
+
354
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrBufferInfoHost(
355
+ cusolverSpHandle_t handle,
356
+ int m,
357
+ int n,
358
+ int nnzA,
359
+ const cusparseMatDescr_t descrA,
360
+ const cuComplex * csrValA,
361
+ const int * csrRowPtrA,
362
+ const int * csrColIndA,
363
+ csrqrInfoHost_t info,
364
+ size_t * internalDataInBytes,
365
+ size_t * workspaceInBytes);
366
+
367
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrBufferInfoHost(
368
+ cusolverSpHandle_t handle,
369
+ int m,
370
+ int n,
371
+ int nnzA,
372
+ const cusparseMatDescr_t descrA,
373
+ const cuDoubleComplex * csrValA,
374
+ const int * csrRowPtrA,
375
+ const int * csrColIndA,
376
+ csrqrInfoHost_t info,
377
+ size_t * internalDataInBytes,
378
+ size_t * workspaceInBytes);
379
+
380
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrSetupHost(
381
+ cusolverSpHandle_t handle,
382
+ int m,
383
+ int n,
384
+ int nnzA,
385
+ const cusparseMatDescr_t descrA,
386
+ const float * csrValA,
387
+ const int * csrRowPtrA,
388
+ const int * csrColIndA,
389
+ float mu,
390
+ csrqrInfoHost_t info);
391
+
392
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrSetupHost(
393
+ cusolverSpHandle_t handle,
394
+ int m,
395
+ int n,
396
+ int nnzA,
397
+ const cusparseMatDescr_t descrA,
398
+ const double * csrValA,
399
+ const int * csrRowPtrA,
400
+ const int * csrColIndA,
401
+ double mu,
402
+ csrqrInfoHost_t info);
403
+
404
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrSetupHost(
405
+ cusolverSpHandle_t handle,
406
+ int m,
407
+ int n,
408
+ int nnzA,
409
+ const cusparseMatDescr_t descrA,
410
+ const cuComplex * csrValA,
411
+ const int * csrRowPtrA,
412
+ const int * csrColIndA,
413
+ cuComplex mu,
414
+ csrqrInfoHost_t info);
415
+
416
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrSetupHost(
417
+ cusolverSpHandle_t handle,
418
+ int m,
419
+ int n,
420
+ int nnzA,
421
+ const cusparseMatDescr_t descrA,
422
+ const cuDoubleComplex * csrValA,
423
+ const int * csrRowPtrA,
424
+ const int * csrColIndA,
425
+ cuDoubleComplex mu,
426
+ csrqrInfoHost_t info);
427
+
428
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrFactorHost(
429
+ cusolverSpHandle_t handle,
430
+ int m,
431
+ int n,
432
+ int nnzA,
433
+ float * b,
434
+ float * x,
435
+ csrqrInfoHost_t info,
436
+ void * pBuffer);
437
+
438
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrFactorHost(
439
+ cusolverSpHandle_t handle,
440
+ int m,
441
+ int n,
442
+ int nnzA,
443
+ double * b,
444
+ double * x,
445
+ csrqrInfoHost_t info,
446
+ void * pBuffer);
447
+
448
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrFactorHost(
449
+ cusolverSpHandle_t handle,
450
+ int m,
451
+ int n,
452
+ int nnzA,
453
+ cuComplex * b,
454
+ cuComplex * x,
455
+ csrqrInfoHost_t info,
456
+ void * pBuffer);
457
+
458
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrFactorHost(
459
+ cusolverSpHandle_t handle,
460
+ int m,
461
+ int n,
462
+ int nnzA,
463
+ cuDoubleComplex * b,
464
+ cuDoubleComplex * x,
465
+ csrqrInfoHost_t info,
466
+ void * pBuffer);
467
+
468
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrZeroPivotHost(
469
+ cusolverSpHandle_t handle,
470
+ csrqrInfoHost_t info,
471
+ float tol,
472
+ int * position);
473
+
474
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrZeroPivotHost(
475
+ cusolverSpHandle_t handle,
476
+ csrqrInfoHost_t info,
477
+ double tol,
478
+ int * position);
479
+
480
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrZeroPivotHost(
481
+ cusolverSpHandle_t handle,
482
+ csrqrInfoHost_t info,
483
+ float tol,
484
+ int * position);
485
+
486
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrZeroPivotHost(
487
+ cusolverSpHandle_t handle,
488
+ csrqrInfoHost_t info,
489
+ double tol,
490
+ int * position);
491
+
492
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrSolveHost(
493
+ cusolverSpHandle_t handle,
494
+ int m,
495
+ int n,
496
+ float * b,
497
+ float * x,
498
+ csrqrInfoHost_t info,
499
+ void * pBuffer);
500
+
501
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrSolveHost(
502
+ cusolverSpHandle_t handle,
503
+ int m,
504
+ int n,
505
+ double * b,
506
+ double * x,
507
+ csrqrInfoHost_t info,
508
+ void * pBuffer);
509
+
510
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrSolveHost(
511
+ cusolverSpHandle_t handle,
512
+ int m,
513
+ int n,
514
+ cuComplex * b,
515
+ cuComplex * x,
516
+ csrqrInfoHost_t info,
517
+ void * pBuffer);
518
+
519
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrSolveHost(
520
+ cusolverSpHandle_t handle,
521
+ int m,
522
+ int n,
523
+ cuDoubleComplex * b,
524
+ cuDoubleComplex * x,
525
+ csrqrInfoHost_t info,
526
+ void * pBuffer);
527
+
528
+ /*
529
+ * Low level API for GPU QR
530
+ *
531
+ */
532
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrqrAnalysis(
533
+ cusolverSpHandle_t handle,
534
+ int m,
535
+ int n,
536
+ int nnzA,
537
+ const cusparseMatDescr_t descrA,
538
+ const int * csrRowPtrA,
539
+ const int * csrColIndA,
540
+ csrqrInfo_t info);
541
+
542
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrBufferInfo(
543
+ cusolverSpHandle_t handle,
544
+ int m,
545
+ int n,
546
+ int nnzA,
547
+ const cusparseMatDescr_t descrA,
548
+ const float * csrValA,
549
+ const int * csrRowPtrA,
550
+ const int * csrColIndA,
551
+ csrqrInfo_t info,
552
+ size_t * internalDataInBytes,
553
+ size_t * workspaceInBytes);
554
+
555
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrBufferInfo(
556
+ cusolverSpHandle_t handle,
557
+ int m,
558
+ int n,
559
+ int nnzA,
560
+ const cusparseMatDescr_t descrA,
561
+ const double * csrValA,
562
+ const int * csrRowPtrA,
563
+ const int * csrColIndA,
564
+ csrqrInfo_t info,
565
+ size_t * internalDataInBytes,
566
+ size_t * workspaceInBytes);
567
+
568
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrBufferInfo(
569
+ cusolverSpHandle_t handle,
570
+ int m,
571
+ int n,
572
+ int nnzA,
573
+ const cusparseMatDescr_t descrA,
574
+ const cuComplex * csrValA,
575
+ const int * csrRowPtrA,
576
+ const int * csrColIndA,
577
+ csrqrInfo_t info,
578
+ size_t * internalDataInBytes,
579
+ size_t * workspaceInBytes);
580
+
581
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrBufferInfo(
582
+ cusolverSpHandle_t handle,
583
+ int m,
584
+ int n,
585
+ int nnzA,
586
+ const cusparseMatDescr_t descrA,
587
+ const cuDoubleComplex * csrValA,
588
+ const int * csrRowPtrA,
589
+ const int * csrColIndA,
590
+ csrqrInfo_t info,
591
+ size_t * internalDataInBytes,
592
+ size_t * workspaceInBytes);
593
+
594
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrSetup(
595
+ cusolverSpHandle_t handle,
596
+ int m,
597
+ int n,
598
+ int nnzA,
599
+ const cusparseMatDescr_t descrA,
600
+ const float * csrValA,
601
+ const int * csrRowPtrA,
602
+ const int * csrColIndA,
603
+ float mu,
604
+ csrqrInfo_t info);
605
+
606
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrSetup(
607
+ cusolverSpHandle_t handle,
608
+ int m,
609
+ int n,
610
+ int nnzA,
611
+ const cusparseMatDescr_t descrA,
612
+ const double * csrValA,
613
+ const int * csrRowPtrA,
614
+ const int * csrColIndA,
615
+ double mu,
616
+ csrqrInfo_t info);
617
+
618
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrSetup(
619
+ cusolverSpHandle_t handle,
620
+ int m,
621
+ int n,
622
+ int nnzA,
623
+ const cusparseMatDescr_t descrA,
624
+ const cuComplex * csrValA,
625
+ const int * csrRowPtrA,
626
+ const int * csrColIndA,
627
+ cuComplex mu,
628
+ csrqrInfo_t info);
629
+
630
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrSetup(
631
+ cusolverSpHandle_t handle,
632
+ int m,
633
+ int n,
634
+ int nnzA,
635
+ const cusparseMatDescr_t descrA,
636
+ const cuDoubleComplex * csrValA,
637
+ const int * csrRowPtrA,
638
+ const int * csrColIndA,
639
+ cuDoubleComplex mu,
640
+ csrqrInfo_t info);
641
+
642
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrFactor(
643
+ cusolverSpHandle_t handle,
644
+ int m,
645
+ int n,
646
+ int nnzA,
647
+ float * b,
648
+ float * x,
649
+ csrqrInfo_t info,
650
+ void * pBuffer);
651
+
652
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrFactor(
653
+ cusolverSpHandle_t handle,
654
+ int m,
655
+ int n,
656
+ int nnzA,
657
+ double * b,
658
+ double * x,
659
+ csrqrInfo_t info,
660
+ void * pBuffer);
661
+
662
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrFactor(
663
+ cusolverSpHandle_t handle,
664
+ int m,
665
+ int n,
666
+ int nnzA,
667
+ cuComplex * b,
668
+ cuComplex * x,
669
+ csrqrInfo_t info,
670
+ void * pBuffer);
671
+
672
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrFactor(
673
+ cusolverSpHandle_t handle,
674
+ int m,
675
+ int n,
676
+ int nnzA,
677
+ cuDoubleComplex * b,
678
+ cuDoubleComplex * x,
679
+ csrqrInfo_t info,
680
+ void * pBuffer);
681
+
682
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrZeroPivot(
683
+ cusolverSpHandle_t handle,
684
+ csrqrInfo_t info,
685
+ float tol,
686
+ int * position);
687
+
688
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrZeroPivot(
689
+ cusolverSpHandle_t handle,
690
+ csrqrInfo_t info,
691
+ double tol,
692
+ int * position);
693
+
694
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrZeroPivot(
695
+ cusolverSpHandle_t handle,
696
+ csrqrInfo_t info,
697
+ float tol,
698
+ int * position);
699
+
700
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrZeroPivot(
701
+ cusolverSpHandle_t handle,
702
+ csrqrInfo_t info,
703
+ double tol,
704
+ int * position);
705
+
706
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrSolve(
707
+ cusolverSpHandle_t handle,
708
+ int m,
709
+ int n,
710
+ float * b,
711
+ float * x,
712
+ csrqrInfo_t info,
713
+ void * pBuffer);
714
+
715
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrSolve(
716
+ cusolverSpHandle_t handle,
717
+ int m,
718
+ int n,
719
+ double * b,
720
+ double * x,
721
+ csrqrInfo_t info,
722
+ void * pBuffer);
723
+
724
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrSolve(
725
+ cusolverSpHandle_t handle,
726
+ int m,
727
+ int n,
728
+ cuComplex * b,
729
+ cuComplex * x,
730
+ csrqrInfo_t info,
731
+ void * pBuffer);
732
+
733
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrSolve(
734
+ cusolverSpHandle_t handle,
735
+ int m,
736
+ int n,
737
+ cuDoubleComplex * b,
738
+ cuDoubleComplex * x,
739
+ csrqrInfo_t info,
740
+ void * pBuffer);
741
+
742
+ /*
743
+ * Low level API for CPU Cholesky
744
+ *
745
+ */
746
+ cusolverStatus_t CUSOLVERAPI
747
+ cusolverSpCreateCsrcholInfoHost(csrcholInfoHost_t *info);
748
+
749
+ cusolverStatus_t CUSOLVERAPI
750
+ cusolverSpDestroyCsrcholInfoHost(csrcholInfoHost_t info);
751
+
752
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrcholAnalysisHost(
753
+ cusolverSpHandle_t handle,
754
+ int n,
755
+ int nnzA,
756
+ const cusparseMatDescr_t descrA,
757
+ const int * csrRowPtrA,
758
+ const int * csrColIndA,
759
+ csrcholInfoHost_t info);
760
+
761
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholBufferInfoHost(
762
+ cusolverSpHandle_t handle,
763
+ int n,
764
+ int nnzA,
765
+ const cusparseMatDescr_t descrA,
766
+ const float * csrValA,
767
+ const int * csrRowPtrA,
768
+ const int * csrColIndA,
769
+ csrcholInfoHost_t info,
770
+ size_t * internalDataInBytes,
771
+ size_t * workspaceInBytes);
772
+
773
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholBufferInfoHost(
774
+ cusolverSpHandle_t handle,
775
+ int n,
776
+ int nnzA,
777
+ const cusparseMatDescr_t descrA,
778
+ const double * csrValA,
779
+ const int * csrRowPtrA,
780
+ const int * csrColIndA,
781
+ csrcholInfoHost_t info,
782
+ size_t * internalDataInBytes,
783
+ size_t * workspaceInBytes);
784
+
785
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholBufferInfoHost(
786
+ cusolverSpHandle_t handle,
787
+ int n,
788
+ int nnzA,
789
+ const cusparseMatDescr_t descrA,
790
+ const cuComplex * csrValA,
791
+ const int * csrRowPtrA,
792
+ const int * csrColIndA,
793
+ csrcholInfoHost_t info,
794
+ size_t * internalDataInBytes,
795
+ size_t * workspaceInBytes);
796
+
797
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholBufferInfoHost(
798
+ cusolverSpHandle_t handle,
799
+ int n,
800
+ int nnzA,
801
+ const cusparseMatDescr_t descrA,
802
+ const cuDoubleComplex * csrValA,
803
+ const int * csrRowPtrA,
804
+ const int * csrColIndA,
805
+ csrcholInfoHost_t info,
806
+ size_t * internalDataInBytes,
807
+ size_t * workspaceInBytes);
808
+
809
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholFactorHost(
810
+ cusolverSpHandle_t handle,
811
+ int n,
812
+ int nnzA,
813
+ const cusparseMatDescr_t descrA,
814
+ const float * csrValA,
815
+ const int * csrRowPtrA,
816
+ const int * csrColIndA,
817
+ csrcholInfoHost_t info,
818
+ void * pBuffer);
819
+
820
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholFactorHost(
821
+ cusolverSpHandle_t handle,
822
+ int n,
823
+ int nnzA,
824
+ const cusparseMatDescr_t descrA,
825
+ const double * csrValA,
826
+ const int * csrRowPtrA,
827
+ const int * csrColIndA,
828
+ csrcholInfoHost_t info,
829
+ void * pBuffer);
830
+
831
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholFactorHost(
832
+ cusolverSpHandle_t handle,
833
+ int n,
834
+ int nnzA,
835
+ const cusparseMatDescr_t descrA,
836
+ const cuComplex * csrValA,
837
+ const int * csrRowPtrA,
838
+ const int * csrColIndA,
839
+ csrcholInfoHost_t info,
840
+ void * pBuffer);
841
+
842
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholFactorHost(
843
+ cusolverSpHandle_t handle,
844
+ int n,
845
+ int nnzA,
846
+ const cusparseMatDescr_t descrA,
847
+ const cuDoubleComplex * csrValA,
848
+ const int * csrRowPtrA,
849
+ const int * csrColIndA,
850
+ csrcholInfoHost_t info,
851
+ void * pBuffer);
852
+
853
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholZeroPivotHost(
854
+ cusolverSpHandle_t handle,
855
+ csrcholInfoHost_t info,
856
+ float tol,
857
+ int * position);
858
+
859
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholZeroPivotHost(
860
+ cusolverSpHandle_t handle,
861
+ csrcholInfoHost_t info,
862
+ double tol,
863
+ int * position);
864
+
865
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholZeroPivotHost(
866
+ cusolverSpHandle_t handle,
867
+ csrcholInfoHost_t info,
868
+ float tol,
869
+ int * position);
870
+
871
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholZeroPivotHost(
872
+ cusolverSpHandle_t handle,
873
+ csrcholInfoHost_t info,
874
+ double tol,
875
+ int * position);
876
+
877
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholSolveHost(
878
+ cusolverSpHandle_t handle,
879
+ int n,
880
+ const float * b,
881
+ float * x,
882
+ csrcholInfoHost_t info,
883
+ void * pBuffer);
884
+
885
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholSolveHost(
886
+ cusolverSpHandle_t handle,
887
+ int n,
888
+ const double * b,
889
+ double * x,
890
+ csrcholInfoHost_t info,
891
+ void * pBuffer);
892
+
893
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholSolveHost(
894
+ cusolverSpHandle_t handle,
895
+ int n,
896
+ const cuComplex * b,
897
+ cuComplex * x,
898
+ csrcholInfoHost_t info,
899
+ void * pBuffer);
900
+
901
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholSolveHost(
902
+ cusolverSpHandle_t handle,
903
+ int n,
904
+ const cuDoubleComplex *b,
905
+ cuDoubleComplex * x,
906
+ csrcholInfoHost_t info,
907
+ void * pBuffer);
908
+
909
+ /*
910
+ * Low level API for GPU Cholesky
911
+ *
912
+ */
913
+ cusolverStatus_t CUSOLVERAPI cusolverSpCreateCsrcholInfo(csrcholInfo_t *info);
914
+
915
+ cusolverStatus_t CUSOLVERAPI cusolverSpDestroyCsrcholInfo(csrcholInfo_t info);
916
+
917
+ cusolverStatus_t CUSOLVERAPI cusolverSpXcsrcholAnalysis(
918
+ cusolverSpHandle_t handle,
919
+ int n,
920
+ int nnzA,
921
+ const cusparseMatDescr_t descrA,
922
+ const int * csrRowPtrA,
923
+ const int * csrColIndA,
924
+ csrcholInfo_t info);
925
+
926
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholBufferInfo(
927
+ cusolverSpHandle_t handle,
928
+ int n,
929
+ int nnzA,
930
+ const cusparseMatDescr_t descrA,
931
+ const float * csrValA,
932
+ const int * csrRowPtrA,
933
+ const int * csrColIndA,
934
+ csrcholInfo_t info,
935
+ size_t * internalDataInBytes,
936
+ size_t * workspaceInBytes);
937
+
938
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholBufferInfo(
939
+ cusolverSpHandle_t handle,
940
+ int n,
941
+ int nnzA,
942
+ const cusparseMatDescr_t descrA,
943
+ const double * csrValA,
944
+ const int * csrRowPtrA,
945
+ const int * csrColIndA,
946
+ csrcholInfo_t info,
947
+ size_t * internalDataInBytes,
948
+ size_t * workspaceInBytes);
949
+
950
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholBufferInfo(
951
+ cusolverSpHandle_t handle,
952
+ int n,
953
+ int nnzA,
954
+ const cusparseMatDescr_t descrA,
955
+ const cuComplex * csrValA,
956
+ const int * csrRowPtrA,
957
+ const int * csrColIndA,
958
+ csrcholInfo_t info,
959
+ size_t * internalDataInBytes,
960
+ size_t * workspaceInBytes);
961
+
962
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholBufferInfo(
963
+ cusolverSpHandle_t handle,
964
+ int n,
965
+ int nnzA,
966
+ const cusparseMatDescr_t descrA,
967
+ const cuDoubleComplex * csrValA,
968
+ const int * csrRowPtrA,
969
+ const int * csrColIndA,
970
+ csrcholInfo_t info,
971
+ size_t * internalDataInBytes,
972
+ size_t * workspaceInBytes);
973
+
974
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholFactor(
975
+ cusolverSpHandle_t handle,
976
+ int n,
977
+ int nnzA,
978
+ const cusparseMatDescr_t descrA,
979
+ const float * csrValA,
980
+ const int * csrRowPtrA,
981
+ const int * csrColIndA,
982
+ csrcholInfo_t info,
983
+ void * pBuffer);
984
+
985
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholFactor(
986
+ cusolverSpHandle_t handle,
987
+ int n,
988
+ int nnzA,
989
+ const cusparseMatDescr_t descrA,
990
+ const double * csrValA,
991
+ const int * csrRowPtrA,
992
+ const int * csrColIndA,
993
+ csrcholInfo_t info,
994
+ void * pBuffer);
995
+
996
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholFactor(
997
+ cusolverSpHandle_t handle,
998
+ int n,
999
+ int nnzA,
1000
+ const cusparseMatDescr_t descrA,
1001
+ const cuComplex * csrValA,
1002
+ const int * csrRowPtrA,
1003
+ const int * csrColIndA,
1004
+ csrcholInfo_t info,
1005
+ void * pBuffer);
1006
+
1007
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholFactor(
1008
+ cusolverSpHandle_t handle,
1009
+ int n,
1010
+ int nnzA,
1011
+ const cusparseMatDescr_t descrA,
1012
+ const cuDoubleComplex * csrValA,
1013
+ const int * csrRowPtrA,
1014
+ const int * csrColIndA,
1015
+ csrcholInfo_t info,
1016
+ void * pBuffer);
1017
+
1018
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholZeroPivot(
1019
+ cusolverSpHandle_t handle,
1020
+ csrcholInfo_t info,
1021
+ float tol,
1022
+ int * position);
1023
+
1024
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholZeroPivot(
1025
+ cusolverSpHandle_t handle,
1026
+ csrcholInfo_t info,
1027
+ double tol,
1028
+ int * position);
1029
+
1030
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholZeroPivot(
1031
+ cusolverSpHandle_t handle,
1032
+ csrcholInfo_t info,
1033
+ float tol,
1034
+ int * position);
1035
+
1036
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholZeroPivot(
1037
+ cusolverSpHandle_t handle,
1038
+ csrcholInfo_t info,
1039
+ double tol,
1040
+ int * position);
1041
+
1042
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholSolve(
1043
+ cusolverSpHandle_t handle,
1044
+ int n,
1045
+ const float * b,
1046
+ float * x,
1047
+ csrcholInfo_t info,
1048
+ void * pBuffer);
1049
+
1050
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholSolve(
1051
+ cusolverSpHandle_t handle,
1052
+ int n,
1053
+ const double * b,
1054
+ double * x,
1055
+ csrcholInfo_t info,
1056
+ void * pBuffer);
1057
+
1058
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholSolve(
1059
+ cusolverSpHandle_t handle,
1060
+ int n,
1061
+ const cuComplex * b,
1062
+ cuComplex * x,
1063
+ csrcholInfo_t info,
1064
+ void * pBuffer);
1065
+
1066
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholSolve(
1067
+ cusolverSpHandle_t handle,
1068
+ int n,
1069
+ const cuDoubleComplex *b,
1070
+ cuDoubleComplex * x,
1071
+ csrcholInfo_t info,
1072
+ void * pBuffer);
1073
+
1074
+ /*
1075
+ * "diag" is a device array of size N.
1076
+ * cusolverSp<t>csrcholDiag returns diag(L) to "diag" where A(P,P) = L*L**T
1077
+ * "diag" can estimate det(A) because det(A(P,P)) = det(A) = det(L)^2 if A =
1078
+ * L*L**T.
1079
+ *
1080
+ * cusolverSp<t>csrcholDiag must be called after cusolverSp<t>csrcholFactor.
1081
+ * otherwise "diag" is wrong.
1082
+ */
1083
+ cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholDiag(
1084
+ cusolverSpHandle_t handle,
1085
+ csrcholInfo_t info,
1086
+ float * diag);
1087
+
1088
+ cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholDiag(
1089
+ cusolverSpHandle_t handle,
1090
+ csrcholInfo_t info,
1091
+ double * diag);
1092
+
1093
+ cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholDiag(
1094
+ cusolverSpHandle_t handle,
1095
+ csrcholInfo_t info,
1096
+ float * diag);
1097
+
1098
+ cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholDiag(
1099
+ cusolverSpHandle_t handle,
1100
+ csrcholInfo_t info,
1101
+ double * diag);
1102
+
1103
+ #if defined(__cplusplus)
1104
+ }
1105
+ #endif /* __cplusplus */
1106
+
1107
+ #endif // CUSOLVERSP_LOWLEVEL_PREVIEW_H_
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusolver/include/cusolver_common.h ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CUSOLVER_COMMON_H_)
51
+ #define CUSOLVER_COMMON_H_
52
+
53
+ #include "library_types.h"
54
+
55
+ #ifndef CUSOLVERAPI
56
+ #ifdef _WIN32
57
+ #define CUSOLVERAPI __stdcall
58
+ #else
59
+ #define CUSOLVERAPI
60
+ #endif
61
+ #endif
62
+
63
+ #if defined(_MSC_VER)
64
+ typedef __int64 int64_t;
65
+ #else
66
+ #include <inttypes.h>
67
+ #endif
68
+
69
+ typedef int cusolver_int_t;
70
+
71
+ #define CUSOLVER_VER_MAJOR 11
72
+ #define CUSOLVER_VER_MINOR 6
73
+ #define CUSOLVER_VER_PATCH 1
74
+ #define CUSOLVER_VER_BUILD 9
75
+ #define CUSOLVER_VERSION \
76
+ (CUSOLVER_VER_MAJOR * 1000 + CUSOLVER_VER_MINOR * 100 + CUSOLVER_VER_PATCH)
77
+
78
+ //------------------------------------------------------------------------------
79
+
80
+ #if !defined(_MSC_VER)
81
+ #define CUSOLVER_CPP_VERSION __cplusplus
82
+ #elif _MSC_FULL_VER >= 190024210 // Visual Studio 2015 Update 3
83
+ #define CUSOLVER_CPP_VERSION _MSVC_LANG
84
+ #else
85
+ #define CUSOLVER_CPP_VERSION 0
86
+ #endif
87
+
88
+ //------------------------------------------------------------------------------
89
+
90
+ #if !defined(DISABLE_CUSOLVER_DEPRECATED)
91
+
92
+ #if CUSOLVER_CPP_VERSION >= 201402L
93
+
94
+ #define CUSOLVER_DEPRECATED(new_func) \
95
+ [[deprecated("please use " #new_func " instead")]]
96
+
97
+ #elif defined(_MSC_VER)
98
+
99
+ #define CUSOLVER_DEPRECATED(new_func) \
100
+ __declspec(deprecated("please use " #new_func " instead"))
101
+
102
+ #elif defined(__INTEL_COMPILER) || defined(__clang__) || \
103
+ (defined(__GNUC__) && \
104
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
105
+
106
+ #define CUSOLVER_DEPRECATED(new_func) \
107
+ __attribute__((deprecated("please use " #new_func " instead")))
108
+
109
+ #elif defined(__GNUC__) || defined(__xlc__)
110
+
111
+ #define CUSOLVER_DEPRECATED(new_func) __attribute__((deprecated))
112
+
113
+ #else
114
+
115
+ #define CUSOLVER_DEPRECATED(new_func)
116
+
117
+ #endif // defined(__cplusplus) && __cplusplus >= 201402L
118
+ //------------------------------------------------------------------------------
119
+
120
+ #if CUSOLVER_CPP_VERSION >= 201703L
121
+
122
+ #define CUSOLVER_DEPRECATED_ENUM(new_enum) \
123
+ [[deprecated("please use " #new_enum " instead")]]
124
+
125
+ #elif defined(__clang__) || \
126
+ (defined(__GNUC__) && __GNUC__ >= 6 && !defined(__PGI))
127
+
128
+ #define CUSOLVER_DEPRECATED_ENUM(new_enum) \
129
+ __attribute__((deprecated("please use " #new_enum " instead")))
130
+
131
+ #else
132
+
133
+ #define CUSOLVER_DEPRECATED_ENUM(new_enum)
134
+
135
+ #endif // defined(__cplusplus) && __cplusplus >= 201402L
136
+
137
+ #else // defined(DISABLE_CUSOLVER_DEPRECATED)
138
+
139
+ #define CUSOLVER_DEPRECATED(new_func)
140
+ #define CUSOLVER_DEPRECATED_ENUM(new_enum)
141
+
142
+ #endif // !defined(DISABLE_CUSOLVER_DEPRECATED)
143
+
144
+ #undef CUSOLVER_CPP_VERSION
145
+
146
+ #if defined(__cplusplus)
147
+ extern "C" {
148
+ #endif /* __cplusplus */
149
+
150
+ typedef enum {
151
+ CUSOLVER_STATUS_SUCCESS = 0,
152
+ CUSOLVER_STATUS_NOT_INITIALIZED = 1,
153
+ CUSOLVER_STATUS_ALLOC_FAILED = 2,
154
+ CUSOLVER_STATUS_INVALID_VALUE = 3,
155
+ CUSOLVER_STATUS_ARCH_MISMATCH = 4,
156
+ CUSOLVER_STATUS_MAPPING_ERROR = 5,
157
+ CUSOLVER_STATUS_EXECUTION_FAILED = 6,
158
+ CUSOLVER_STATUS_INTERNAL_ERROR = 7,
159
+ CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED = 8,
160
+ CUSOLVER_STATUS_NOT_SUPPORTED = 9,
161
+ CUSOLVER_STATUS_ZERO_PIVOT = 10,
162
+ CUSOLVER_STATUS_INVALID_LICENSE = 11,
163
+ CUSOLVER_STATUS_IRS_PARAMS_NOT_INITIALIZED = 12,
164
+ CUSOLVER_STATUS_IRS_PARAMS_INVALID = 13,
165
+ CUSOLVER_STATUS_IRS_PARAMS_INVALID_PREC = 14,
166
+ CUSOLVER_STATUS_IRS_PARAMS_INVALID_REFINE = 15,
167
+ CUSOLVER_STATUS_IRS_PARAMS_INVALID_MAXITER = 16,
168
+ CUSOLVER_STATUS_IRS_INTERNAL_ERROR = 20,
169
+ CUSOLVER_STATUS_IRS_NOT_SUPPORTED = 21,
170
+ CUSOLVER_STATUS_IRS_OUT_OF_RANGE = 22,
171
+ CUSOLVER_STATUS_IRS_NRHS_NOT_SUPPORTED_FOR_REFINE_GMRES = 23,
172
+ CUSOLVER_STATUS_IRS_INFOS_NOT_INITIALIZED = 25,
173
+ CUSOLVER_STATUS_IRS_INFOS_NOT_DESTROYED = 26,
174
+ CUSOLVER_STATUS_IRS_MATRIX_SINGULAR = 30,
175
+ CUSOLVER_STATUS_INVALID_WORKSPACE = 31
176
+ } cusolverStatus_t;
177
+
178
+ typedef enum {
179
+ CUSOLVER_EIG_TYPE_1 = 1,
180
+ CUSOLVER_EIG_TYPE_2 = 2,
181
+ CUSOLVER_EIG_TYPE_3 = 3
182
+ } cusolverEigType_t;
183
+
184
+ typedef enum {
185
+ CUSOLVER_EIG_MODE_NOVECTOR = 0,
186
+ CUSOLVER_EIG_MODE_VECTOR = 1
187
+ } cusolverEigMode_t;
188
+
189
+ typedef enum {
190
+ CUSOLVER_EIG_RANGE_ALL = 1001,
191
+ CUSOLVER_EIG_RANGE_I = 1002,
192
+ CUSOLVER_EIG_RANGE_V = 1003,
193
+ } cusolverEigRange_t;
194
+
195
+ typedef enum {
196
+ CUSOLVER_INF_NORM = 104,
197
+ CUSOLVER_MAX_NORM = 105,
198
+ CUSOLVER_ONE_NORM = 106,
199
+ CUSOLVER_FRO_NORM = 107,
200
+ } cusolverNorm_t;
201
+
202
+ typedef enum {
203
+ CUSOLVER_IRS_REFINE_NOT_SET = 1100,
204
+ CUSOLVER_IRS_REFINE_NONE = 1101,
205
+ CUSOLVER_IRS_REFINE_CLASSICAL = 1102,
206
+ CUSOLVER_IRS_REFINE_CLASSICAL_GMRES = 1103,
207
+ CUSOLVER_IRS_REFINE_GMRES = 1104,
208
+ CUSOLVER_IRS_REFINE_GMRES_GMRES = 1105,
209
+ CUSOLVER_IRS_REFINE_GMRES_NOPCOND = 1106,
210
+
211
+ CUSOLVER_PREC_DD = 1150,
212
+ CUSOLVER_PREC_SS = 1151,
213
+ CUSOLVER_PREC_SHT = 1152,
214
+
215
+ } cusolverIRSRefinement_t;
216
+
217
+ typedef enum {
218
+ CUSOLVER_R_8I = 1201,
219
+ CUSOLVER_R_8U = 1202,
220
+ CUSOLVER_R_64F = 1203,
221
+ CUSOLVER_R_32F = 1204,
222
+ CUSOLVER_R_16F = 1205,
223
+ CUSOLVER_R_16BF = 1206,
224
+ CUSOLVER_R_TF32 = 1207,
225
+ CUSOLVER_R_AP = 1208,
226
+ CUSOLVER_C_8I = 1211,
227
+ CUSOLVER_C_8U = 1212,
228
+ CUSOLVER_C_64F = 1213,
229
+ CUSOLVER_C_32F = 1214,
230
+ CUSOLVER_C_16F = 1215,
231
+ CUSOLVER_C_16BF = 1216,
232
+ CUSOLVER_C_TF32 = 1217,
233
+ CUSOLVER_C_AP = 1218,
234
+ } cusolverPrecType_t;
235
+
236
+ typedef enum {
237
+ CUSOLVER_ALG_0 = 0, /* default algorithm */
238
+ CUSOLVER_ALG_1 = 1,
239
+ CUSOLVER_ALG_2 = 2
240
+ } cusolverAlgMode_t;
241
+
242
+ typedef enum {
243
+ CUBLAS_STOREV_COLUMNWISE = 0,
244
+ CUBLAS_STOREV_ROWWISE = 1
245
+ } cusolverStorevMode_t;
246
+
247
+ typedef enum {
248
+ CUBLAS_DIRECT_FORWARD = 0,
249
+ CUBLAS_DIRECT_BACKWARD = 1
250
+ } cusolverDirectMode_t;
251
+
252
+ cusolverStatus_t CUSOLVERAPI
253
+ cusolverGetProperty(libraryPropertyType type, int *value);
254
+
255
+ cusolverStatus_t CUSOLVERAPI cusolverGetVersion(int *version);
256
+
257
+ #if defined(__cplusplus)
258
+ }
259
+ #endif /* __cplusplus */
260
+
261
+ #endif // CUSOLVER_COMMON_H_
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/__init__.py ADDED
File without changes
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (174 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/include/__init__.py ADDED
File without changes
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/include/cusparse.h ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/include/cusparse_v2.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+ #if !defined(CUSPARSE_V2_H_)
50
+ #define CUSPARSE_V2_H_
51
+
52
+ #include "cusparse.h"
53
+
54
+ #endif
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/lib/__init__.py ADDED
File without changes
evalkit_tf449/lib/python3.10/site-packages/nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/pandas/__init__.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import warnings
5
+
6
+ __docformat__ = "restructuredtext"
7
+
8
+ # Let users know if they're missing any of our hard dependencies
9
+ _hard_dependencies = ("numpy", "pytz", "dateutil")
10
+ _missing_dependencies = []
11
+
12
+ for _dependency in _hard_dependencies:
13
+ try:
14
+ __import__(_dependency)
15
+ except ImportError as _e: # pragma: no cover
16
+ _missing_dependencies.append(f"{_dependency}: {_e}")
17
+
18
+ if _missing_dependencies: # pragma: no cover
19
+ raise ImportError(
20
+ "Unable to import required dependencies:\n" + "\n".join(_missing_dependencies)
21
+ )
22
+ del _hard_dependencies, _dependency, _missing_dependencies
23
+
24
+ try:
25
+ # numpy compat
26
+ from pandas.compat import (
27
+ is_numpy_dev as _is_numpy_dev, # pyright: ignore[reportUnusedImport] # noqa: F401
28
+ )
29
+ except ImportError as _err: # pragma: no cover
30
+ _module = _err.name
31
+ raise ImportError(
32
+ f"C extension: {_module} not built. If you want to import "
33
+ "pandas from the source directory, you may need to run "
34
+ "'python setup.py build_ext' to build the C extensions first."
35
+ ) from _err
36
+
37
+ from pandas._config import (
38
+ get_option,
39
+ set_option,
40
+ reset_option,
41
+ describe_option,
42
+ option_context,
43
+ options,
44
+ )
45
+
46
+ # let init-time option registration happen
47
+ import pandas.core.config_init # pyright: ignore[reportUnusedImport] # noqa: F401
48
+
49
+ from pandas.core.api import (
50
+ # dtype
51
+ ArrowDtype,
52
+ Int8Dtype,
53
+ Int16Dtype,
54
+ Int32Dtype,
55
+ Int64Dtype,
56
+ UInt8Dtype,
57
+ UInt16Dtype,
58
+ UInt32Dtype,
59
+ UInt64Dtype,
60
+ Float32Dtype,
61
+ Float64Dtype,
62
+ CategoricalDtype,
63
+ PeriodDtype,
64
+ IntervalDtype,
65
+ DatetimeTZDtype,
66
+ StringDtype,
67
+ BooleanDtype,
68
+ # missing
69
+ NA,
70
+ isna,
71
+ isnull,
72
+ notna,
73
+ notnull,
74
+ # indexes
75
+ Index,
76
+ CategoricalIndex,
77
+ RangeIndex,
78
+ MultiIndex,
79
+ IntervalIndex,
80
+ TimedeltaIndex,
81
+ DatetimeIndex,
82
+ PeriodIndex,
83
+ IndexSlice,
84
+ # tseries
85
+ NaT,
86
+ Period,
87
+ period_range,
88
+ Timedelta,
89
+ timedelta_range,
90
+ Timestamp,
91
+ date_range,
92
+ bdate_range,
93
+ Interval,
94
+ interval_range,
95
+ DateOffset,
96
+ # conversion
97
+ to_numeric,
98
+ to_datetime,
99
+ to_timedelta,
100
+ # misc
101
+ Flags,
102
+ Grouper,
103
+ factorize,
104
+ unique,
105
+ value_counts,
106
+ NamedAgg,
107
+ array,
108
+ Categorical,
109
+ set_eng_float_format,
110
+ Series,
111
+ DataFrame,
112
+ )
113
+
114
+ from pandas.core.dtypes.dtypes import SparseDtype
115
+
116
+ from pandas.tseries.api import infer_freq
117
+ from pandas.tseries import offsets
118
+
119
+ from pandas.core.computation.api import eval
120
+
121
+ from pandas.core.reshape.api import (
122
+ concat,
123
+ lreshape,
124
+ melt,
125
+ wide_to_long,
126
+ merge,
127
+ merge_asof,
128
+ merge_ordered,
129
+ crosstab,
130
+ pivot,
131
+ pivot_table,
132
+ get_dummies,
133
+ from_dummies,
134
+ cut,
135
+ qcut,
136
+ )
137
+
138
+ from pandas import api, arrays, errors, io, plotting, tseries
139
+ from pandas import testing
140
+ from pandas.util._print_versions import show_versions
141
+
142
+ from pandas.io.api import (
143
+ # excel
144
+ ExcelFile,
145
+ ExcelWriter,
146
+ read_excel,
147
+ # parsers
148
+ read_csv,
149
+ read_fwf,
150
+ read_table,
151
+ # pickle
152
+ read_pickle,
153
+ to_pickle,
154
+ # pytables
155
+ HDFStore,
156
+ read_hdf,
157
+ # sql
158
+ read_sql,
159
+ read_sql_query,
160
+ read_sql_table,
161
+ # misc
162
+ read_clipboard,
163
+ read_parquet,
164
+ read_orc,
165
+ read_feather,
166
+ read_gbq,
167
+ read_html,
168
+ read_xml,
169
+ read_json,
170
+ read_stata,
171
+ read_sas,
172
+ read_spss,
173
+ )
174
+
175
+ from pandas.io.json._normalize import json_normalize
176
+
177
+ from pandas.util._tester import test
178
+
179
+ # use the closest tagged version if possible
180
+ _built_with_meson = False
181
+ try:
182
+ from pandas._version_meson import ( # pyright: ignore [reportMissingImports]
183
+ __version__,
184
+ __git_version__,
185
+ )
186
+
187
+ _built_with_meson = True
188
+ except ImportError:
189
+ from pandas._version import get_versions
190
+
191
+ v = get_versions()
192
+ __version__ = v.get("closest-tag", v["version"])
193
+ __git_version__ = v.get("full-revisionid")
194
+ del get_versions, v
195
+
196
+ # GH#55043 - deprecation of the data_manager option
197
+ if "PANDAS_DATA_MANAGER" in os.environ:
198
+ warnings.warn(
199
+ "The env variable PANDAS_DATA_MANAGER is set. The data_manager option is "
200
+ "deprecated and will be removed in a future version. Only the BlockManager "
201
+ "will be available. Unset this environment variable to silence this warning.",
202
+ FutureWarning,
203
+ stacklevel=2,
204
+ )
205
+
206
+ del warnings, os
207
+
208
+ # module level doc-string
209
+ __doc__ = """
210
+ pandas - a powerful data analysis and manipulation library for Python
211
+ =====================================================================
212
+
213
+ **pandas** is a Python package providing fast, flexible, and expressive data
214
+ structures designed to make working with "relational" or "labeled" data both
215
+ easy and intuitive. It aims to be the fundamental high-level building block for
216
+ doing practical, **real world** data analysis in Python. Additionally, it has
217
+ the broader goal of becoming **the most powerful and flexible open source data
218
+ analysis / manipulation tool available in any language**. It is already well on
219
+ its way toward this goal.
220
+
221
+ Main Features
222
+ -------------
223
+ Here are just a few of the things that pandas does well:
224
+
225
+ - Easy handling of missing data in floating point as well as non-floating
226
+ point data.
227
+ - Size mutability: columns can be inserted and deleted from DataFrame and
228
+ higher dimensional objects
229
+ - Automatic and explicit data alignment: objects can be explicitly aligned
230
+ to a set of labels, or the user can simply ignore the labels and let
231
+ `Series`, `DataFrame`, etc. automatically align the data for you in
232
+ computations.
233
+ - Powerful, flexible group by functionality to perform split-apply-combine
234
+ operations on data sets, for both aggregating and transforming data.
235
+ - Make it easy to convert ragged, differently-indexed data in other Python
236
+ and NumPy data structures into DataFrame objects.
237
+ - Intelligent label-based slicing, fancy indexing, and subsetting of large
238
+ data sets.
239
+ - Intuitive merging and joining data sets.
240
+ - Flexible reshaping and pivoting of data sets.
241
+ - Hierarchical labeling of axes (possible to have multiple labels per tick).
242
+ - Robust IO tools for loading data from flat files (CSV and delimited),
243
+ Excel files, databases, and saving/loading data from the ultrafast HDF5
244
+ format.
245
+ - Time series-specific functionality: date range generation and frequency
246
+ conversion, moving window statistics, date shifting and lagging.
247
+ """
248
+
249
+ # Use __all__ to let type checkers know what is part of the public API.
250
+ # Pandas is not (yet) a py.typed library: the public API is determined
251
+ # based on the documentation.
252
+ __all__ = [
253
+ "ArrowDtype",
254
+ "BooleanDtype",
255
+ "Categorical",
256
+ "CategoricalDtype",
257
+ "CategoricalIndex",
258
+ "DataFrame",
259
+ "DateOffset",
260
+ "DatetimeIndex",
261
+ "DatetimeTZDtype",
262
+ "ExcelFile",
263
+ "ExcelWriter",
264
+ "Flags",
265
+ "Float32Dtype",
266
+ "Float64Dtype",
267
+ "Grouper",
268
+ "HDFStore",
269
+ "Index",
270
+ "IndexSlice",
271
+ "Int16Dtype",
272
+ "Int32Dtype",
273
+ "Int64Dtype",
274
+ "Int8Dtype",
275
+ "Interval",
276
+ "IntervalDtype",
277
+ "IntervalIndex",
278
+ "MultiIndex",
279
+ "NA",
280
+ "NaT",
281
+ "NamedAgg",
282
+ "Period",
283
+ "PeriodDtype",
284
+ "PeriodIndex",
285
+ "RangeIndex",
286
+ "Series",
287
+ "SparseDtype",
288
+ "StringDtype",
289
+ "Timedelta",
290
+ "TimedeltaIndex",
291
+ "Timestamp",
292
+ "UInt16Dtype",
293
+ "UInt32Dtype",
294
+ "UInt64Dtype",
295
+ "UInt8Dtype",
296
+ "api",
297
+ "array",
298
+ "arrays",
299
+ "bdate_range",
300
+ "concat",
301
+ "crosstab",
302
+ "cut",
303
+ "date_range",
304
+ "describe_option",
305
+ "errors",
306
+ "eval",
307
+ "factorize",
308
+ "get_dummies",
309
+ "from_dummies",
310
+ "get_option",
311
+ "infer_freq",
312
+ "interval_range",
313
+ "io",
314
+ "isna",
315
+ "isnull",
316
+ "json_normalize",
317
+ "lreshape",
318
+ "melt",
319
+ "merge",
320
+ "merge_asof",
321
+ "merge_ordered",
322
+ "notna",
323
+ "notnull",
324
+ "offsets",
325
+ "option_context",
326
+ "options",
327
+ "period_range",
328
+ "pivot",
329
+ "pivot_table",
330
+ "plotting",
331
+ "qcut",
332
+ "read_clipboard",
333
+ "read_csv",
334
+ "read_excel",
335
+ "read_feather",
336
+ "read_fwf",
337
+ "read_gbq",
338
+ "read_hdf",
339
+ "read_html",
340
+ "read_json",
341
+ "read_orc",
342
+ "read_parquet",
343
+ "read_pickle",
344
+ "read_sas",
345
+ "read_spss",
346
+ "read_sql",
347
+ "read_sql_query",
348
+ "read_sql_table",
349
+ "read_stata",
350
+ "read_table",
351
+ "read_xml",
352
+ "reset_option",
353
+ "set_eng_float_format",
354
+ "set_option",
355
+ "show_versions",
356
+ "test",
357
+ "testing",
358
+ "timedelta_range",
359
+ "to_datetime",
360
+ "to_numeric",
361
+ "to_pickle",
362
+ "to_timedelta",
363
+ "tseries",
364
+ "unique",
365
+ "value_counts",
366
+ "wide_to_long",
367
+ ]
evalkit_tf449/lib/python3.10/site-packages/pandas/_libs/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = [
2
+ "NaT",
3
+ "NaTType",
4
+ "OutOfBoundsDatetime",
5
+ "Period",
6
+ "Timedelta",
7
+ "Timestamp",
8
+ "iNaT",
9
+ "Interval",
10
+ ]
11
+
12
+
13
+ # Below imports needs to happen first to ensure pandas top level
14
+ # module gets monkeypatched with the pandas_datetime_CAPI
15
+ # see pandas_datetime_exec in pd_datetime.c
16
+ import pandas._libs.pandas_parser # isort: skip # type: ignore[reportUnusedImport]
17
+ import pandas._libs.pandas_datetime # noqa: F401 # isort: skip # type: ignore[reportUnusedImport]
18
+ from pandas._libs.interval import Interval
19
+ from pandas._libs.tslibs import (
20
+ NaT,
21
+ NaTType,
22
+ OutOfBoundsDatetime,
23
+ Period,
24
+ Timedelta,
25
+ Timestamp,
26
+ iNaT,
27
+ )