ZTWHHH commited on
Commit
a54b246
·
verified ·
1 Parent(s): 779a018

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi +0 -0
  3. infer_4_30_0/lib/python3.10/site-packages/torch/_C/__init__.pyi +0 -0
  4. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_aoti.pyi +20 -0
  5. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_autograd.pyi +135 -0
  6. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_cpu.pyi +12 -0
  7. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_cudnn.pyi +17 -0
  8. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_cusparselt.pyi +1 -0
  9. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi +27 -0
  10. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi +699 -0
  11. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi +188 -0
  12. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi +32 -0
  13. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_functions.pyi +11 -0
  14. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_functorch.pyi +83 -0
  15. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_instruction_counter.pyi +4 -0
  16. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_itt.pyi +5 -0
  17. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_lazy.pyi +27 -0
  18. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi +12 -0
  19. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_monitor.pyi +44 -0
  20. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_nn.pyi +89 -0
  21. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_nvtx.pyi +7 -0
  22. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_onnx.pyi +39 -0
  23. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_profiler.pyi +244 -0
  24. infer_4_30_0/lib/python3.10/site-packages/torch/_C/_verbose.pyi +3 -0
  25. infer_4_30_0/lib/python3.10/site-packages/torch/_awaits/__init__.py +53 -0
  26. infer_4_30_0/lib/python3.10/site-packages/torch/_awaits/__pycache__/__init__.cpython-310.pyc +0 -0
  27. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/__init__.py +172 -0
  28. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/__pycache__/dirichlet.cpython-310.pyc +0 -0
  29. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/__pycache__/lkj_cholesky.cpython-310.pyc +0 -0
  30. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/__pycache__/mixture_same_family.cpython-310.pyc +0 -0
  31. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/__pycache__/von_mises.cpython-310.pyc +0 -0
  32. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/bernoulli.py +132 -0
  33. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/binomial.py +167 -0
  34. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/cauchy.py +93 -0
  35. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/chi2.py +35 -0
  36. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/constraint_registry.py +294 -0
  37. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/continuous_bernoulli.py +238 -0
  38. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/dirichlet.py +126 -0
  39. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/distribution.py +340 -0
  40. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/exp_family.py +64 -0
  41. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/exponential.py +87 -0
  42. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/gamma.py +111 -0
  43. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/gumbel.py +83 -0
  44. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/half_cauchy.py +84 -0
  45. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/inverse_gamma.py +81 -0
  46. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/kl.py +972 -0
  47. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/kumaraswamy.py +99 -0
  48. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/laplace.py +97 -0
  49. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/lkj_cholesky.py +144 -0
  50. infer_4_30_0/lib/python3.10/site-packages/torch/distributions/log_normal.py +64 -0
.gitattributes CHANGED
@@ -1859,3 +1859,4 @@ infer_4_30_0/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.c
1859
  infer_4_30_0/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1860
  infer_4_30_0/lib/python3.10/site-packages/shapely/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1861
  infer_4_30_0/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1859
  infer_4_30_0/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1860
  infer_4_30_0/lib/python3.10/site-packages/shapely/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1861
  infer_4_30_0/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1862
+ janus/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_heuristic.so.9 filter=lfs diff=lfs merge=lfs -text
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_30_0/lib/python3.10/site-packages/torch/_C/__init__.pyi ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_aoti.pyi ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ctypes import c_void_p
2
+
3
+ from torch import Tensor
4
+
5
+ # Defined in torch/csrc/inductor/aoti_runner/pybind.cpp
6
+
7
+ # Tensor to AtenTensorHandle
8
+ def unsafe_alloc_void_ptrs_from_tensors(tensors: list[Tensor]) -> list[c_void_p]: ...
9
+ def unsafe_alloc_void_ptr_from_tensor(tensor: Tensor) -> c_void_p: ...
10
+
11
+ # AtenTensorHandle to Tensor
12
+ def alloc_tensors_by_stealing_from_void_ptrs(
13
+ handles: list[c_void_p],
14
+ ) -> list[Tensor]: ...
15
+ def alloc_tensor_by_stealing_from_void_ptr(
16
+ handle: c_void_p,
17
+ ) -> Tensor: ...
18
+
19
+ class AOTIModelContainerRunnerCpu: ...
20
+ class AOTIModelContainerRunnerCuda: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_autograd.pyi ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from enum import Enum
3
+ from typing import Any, Callable
4
+
5
+ import torch
6
+ from torch._C._profiler import (
7
+ _ProfilerEvent,
8
+ ActiveProfilerType,
9
+ ProfilerActivity,
10
+ ProfilerConfig,
11
+ )
12
+
13
+ # Defined in torch/csrc/autograd/init.cpp
14
+
15
+ class DeviceType(Enum):
16
+ CPU = ...
17
+ CUDA = ...
18
+ XPU = ...
19
+ MKLDNN = ...
20
+ OPENGL = ...
21
+ OPENCL = ...
22
+ IDEEP = ...
23
+ HIP = ...
24
+ FPGA = ...
25
+ MAIA = ...
26
+ XLA = ...
27
+ MTIA = ...
28
+ MPS = ...
29
+ HPU = ...
30
+ Meta = ...
31
+ Vulkan = ...
32
+ Metal = ...
33
+ PrivateUse1 = ...
34
+
35
+ class ProfilerEvent:
36
+ def cpu_elapsed_us(self, other: ProfilerEvent) -> float: ...
37
+ def cpu_memory_usage(self) -> int: ...
38
+ def cuda_elapsed_us(self, other: ProfilerEvent) -> float: ...
39
+ def privateuse1_elapsed_us(self, other: ProfilerEvent) -> float: ...
40
+ def cuda_memory_usage(self) -> int: ...
41
+ def device(self) -> int: ...
42
+ def handle(self) -> int: ...
43
+ def has_cuda(self) -> bool: ...
44
+ def is_remote(self) -> bool: ...
45
+ def kind(self) -> int: ...
46
+ def name(self) -> str: ...
47
+ def node_id(self) -> int: ...
48
+ def sequence_nr(self) -> int: ...
49
+ def shapes(self) -> list[list[int]]: ...
50
+ def thread_id(self) -> int: ...
51
+ def flops(self) -> float: ...
52
+ def is_async(self) -> bool: ...
53
+
54
+ class _KinetoEvent:
55
+ def name(self) -> str: ...
56
+ def device_index(self) -> int: ...
57
+ def device_resource_id(self) -> int: ...
58
+ def start_ns(self) -> int: ...
59
+ def end_ns(self) -> int: ...
60
+ def duration_ns(self) -> int: ...
61
+ def is_async(self) -> bool: ...
62
+ def linked_correlation_id(self) -> int: ...
63
+ def shapes(self) -> list[list[int]]: ...
64
+ def dtypes(self) -> list[str]: ...
65
+ def concrete_inputs(self) -> list[Any]: ...
66
+ def kwinputs(self) -> dict[str, Any]: ...
67
+ def device_type(self) -> DeviceType: ...
68
+ def start_thread_id(self) -> int: ...
69
+ def end_thread_id(self) -> int: ...
70
+ def correlation_id(self) -> int: ...
71
+ def fwd_thread_id(self) -> int: ...
72
+ def stack(self) -> list[str]: ...
73
+ def scope(self) -> int: ...
74
+ def sequence_nr(self) -> int: ...
75
+ def flops(self) -> int: ...
76
+ def cuda_elapsed_us(self) -> int: ...
77
+ def privateuse1_elapsed_us(self) -> int: ...
78
+ def is_user_annotation(self) -> bool: ...
79
+
80
+ class _ProfilerResult:
81
+ def events(self) -> list[_KinetoEvent]: ...
82
+ def legacy_events(self) -> list[list[ProfilerEvent]]: ...
83
+ def save(self, path: str) -> None: ...
84
+ def experimental_event_tree(self) -> list[_ProfilerEvent]: ...
85
+ def trace_start_ns(self) -> int: ...
86
+
87
+ class SavedTensor: ...
88
+
89
+ def _enable_profiler(
90
+ config: ProfilerConfig,
91
+ activities: set[ProfilerActivity],
92
+ ) -> None: ...
93
+ def _prepare_profiler(
94
+ config: ProfilerConfig,
95
+ activities: set[ProfilerActivity],
96
+ ) -> None: ...
97
+ def _toggle_collection_dynamic(
98
+ enable: bool,
99
+ activities: set[ProfilerActivity],
100
+ ) -> None: ...
101
+ def _disable_profiler() -> _ProfilerResult: ...
102
+ def _profiler_enabled() -> bool: ...
103
+ def _add_metadata_json(key: str, value: str) -> None: ...
104
+ def _kineto_step() -> None: ...
105
+ def _get_current_graph_task_keep_graph() -> bool: ...
106
+ def _get_sequence_nr() -> int: ...
107
+ def kineto_available() -> bool: ...
108
+ def _record_function_with_args_enter(name: str, *args) -> torch.Tensor: ...
109
+ def _record_function_with_args_exit(handle: torch.Tensor) -> None: ...
110
+ def _supported_activities() -> set[ProfilerActivity]: ...
111
+ def _enable_record_function(enable: bool) -> None: ...
112
+ def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ...
113
+ def _push_saved_tensors_default_hooks(
114
+ pack_hook: Callable[[torch.Tensor], Any],
115
+ unpack_hook: Callable[[Any], torch.Tensor],
116
+ ) -> None: ...
117
+ def _pop_saved_tensors_default_hooks() -> None: ...
118
+ def _unsafe_set_version_counter(t: torch.Tensor, prev_version: int) -> None: ...
119
+ def _enable_profiler_legacy(config: ProfilerConfig) -> None: ...
120
+ def _disable_profiler_legacy() -> list[list[ProfilerEvent]]: ...
121
+ def _profiler_type() -> ActiveProfilerType: ...
122
+ def _saved_tensors_hooks_enable() -> None: ...
123
+ def _saved_tensors_hooks_disable(message: str) -> None: ...
124
+ def _saved_tensors_hooks_get_disabled_error_message() -> str | None: ...
125
+ def _saved_tensors_hooks_set_tracing(is_tracing: bool) -> bool: ...
126
+
127
+ class CreationMeta(Enum):
128
+ DEFAULT = ...
129
+ IN_CUSTOM_FUNCTION = ...
130
+ MULTI_OUTPUT_NODE = ...
131
+ NO_GRAD_MODE = ...
132
+ INFERENCE_MODE = ...
133
+
134
+ def _set_creation_meta(t: torch.Tensor, creation_meta: CreationMeta) -> None: ...
135
+ def _get_creation_meta(t: torch.Tensor) -> CreationMeta: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_cpu.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.types import _bool, _int
2
+
3
+ # Defined in torch/csrc/cpu/Module.cpp
4
+
5
+ def _is_avx2_supported() -> _bool: ...
6
+ def _is_avx512_supported() -> _bool: ...
7
+ def _is_avx512_vnni_supported() -> _bool: ...
8
+ def _is_avx512_bf16_supported() -> _bool: ...
9
+ def _is_amx_tile_supported() -> _bool: ...
10
+ def _init_amx() -> _bool: ...
11
+ def _L1d_cache_size() -> _int: ...
12
+ def _L2_cache_size() -> _int: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_cudnn.pyi ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+ from torch.types import _bool, Tuple
4
+
5
+ # Defined in torch/csrc/cuda/shared/cudnn.cpp
6
+ is_cuda: _bool
7
+
8
+ def getRuntimeVersion() -> Tuple[int, int, int]: ...
9
+ def getCompileVersion() -> Tuple[int, int, int]: ...
10
+ def getVersionInt() -> int: ...
11
+
12
+ class RNNMode(int, Enum):
13
+ value: int
14
+ rnn_relu = ...
15
+ rnn_tanh = ...
16
+ lstm = ...
17
+ gru = ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_cusparselt.pyi ADDED
@@ -0,0 +1 @@
 
 
1
+ def getVersionInt() -> int: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Any
3
+
4
+ import torch
5
+
6
+ # This module is defined in torch/csrc/distributed/autograd/init.cpp
7
+
8
+ class DistAutogradContext:
9
+ def _context_id(self) -> int: ...
10
+ def _recv_functions(self) -> dict[int, Any]: ...
11
+ def _send_functions(self) -> dict[int, Any]: ...
12
+ def _known_worker_ids(self) -> set[int]: ...
13
+
14
+ def _new_context() -> DistAutogradContext: ...
15
+ def _release_context(context_id: int) -> None: ...
16
+ def _get_max_id() -> int: ...
17
+ def _is_valid_context(worker_id: int) -> bool: ...
18
+ def _retrieve_context(context_id: int) -> DistAutogradContext: ...
19
+ def _current_context() -> DistAutogradContext: ...
20
+ def _init(worker_id: int) -> None: ...
21
+ def _get_debug_info() -> dict[str, str]: ...
22
+ def backward(
23
+ context_id: int,
24
+ roots: list[torch.Tensor],
25
+ retain_graph=False,
26
+ ) -> None: ...
27
+ def get_gradients(context_id: int) -> dict[torch.Tensor, torch.Tensor]: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi ADDED
@@ -0,0 +1,699 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # mypy: disable-error-code="type-arg"
3
+ from datetime import timedelta
4
+ from enum import Enum
5
+ from typing import Any, Optional, overload
6
+
7
+ import torch
8
+ from torch import Tensor
9
+ from torch._C import ScriptObject
10
+ from torch.futures import Future
11
+
12
+ # This module is defined in torch/csrc/distributed/c10d/init.cpp
13
+
14
+ _DEFAULT_FIRST_BUCKET_BYTES: int
15
+ _DEFAULT_NO_TIMEOUT: timedelta
16
+ _DEFAULT_PG_TIMEOUT: timedelta
17
+ _DEFAULT_PG_NCCL_TIMEOUT: timedelta
18
+
19
+ class BuiltinCommHookType(Enum):
20
+ ALLREDUCE = ...
21
+ FP16_COMPRESS = ...
22
+
23
+ def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ...
24
+ def _register_builtin_comm_hook(
25
+ reducer: Reducer,
26
+ comm_hook_type: BuiltinCommHookType,
27
+ ): ...
28
+ def _set_global_rank(rank: int) -> None: ...
29
+ def _hash_tensors(tensors: list[Tensor]) -> int: ...
30
+
31
+ class GradBucket:
32
+ def index(self) -> int: ...
33
+ def buffer(self) -> Tensor: ...
34
+ def gradients(self) -> list[Tensor]: ...
35
+ def is_last(self) -> bool: ...
36
+ def set_buffer(self, tensor: Tensor) -> None: ...
37
+ def parameters(self) -> list[Tensor]: ...
38
+
39
+ class Reducer:
40
+ def __init__(
41
+ self,
42
+ params: list[Tensor],
43
+ bucket_indices: list[list[int]],
44
+ per_bucket_size_limits: list[int],
45
+ process_group: ProcessGroup,
46
+ expect_sparse_gradients: list[bool] = ...,
47
+ bucket_bytes_cap: int = ..., # kDefaultBucketBytesCap in reducer.hpp
48
+ find_unused_parameters: bool = ...,
49
+ gradient_as_bucket_view: bool = ...,
50
+ param_to_name_mapping: dict[int, str] = ...,
51
+ first_bucket_types_cap: int = ..., # kDefaultFirstBucketBytes in reducer.hpp
52
+ ) -> None: ...
53
+ def prepare_for_forward(self) -> None: ...
54
+ def prepare_for_backward(self, output: list[Tensor]) -> None: ...
55
+ def get_backward_stats(self) -> list[int]: ...
56
+ def _install_post_backward_futures(self, futures: list[Future]) -> None: ...
57
+ def _rebuild_buckets(self) -> bool: ...
58
+ def _get_zeros_like_grad_buckets(self) -> list[GradBucket]: ...
59
+ def _push_all_rebuilt_params(self) -> None: ...
60
+ def _set_forward_pass_work_handle(
61
+ self,
62
+ work: Work,
63
+ use_static_world_size: bool,
64
+ ): ...
65
+ def _get_local_used_map(self) -> Tensor: ...
66
+ def _set_ddp_runtime_logging_sample_rate(self, sample_rate: int) -> None: ...
67
+ def _set_static_graph(self) -> None: ...
68
+ def _run_comm_hook(self, bucket: GradBucket) -> Future: ...
69
+ def set_logger(self, logger: Logger) -> None: ...
70
+ def _remove_autograd_hooks(self) -> None: ...
71
+ def _check_reducer_finalized(self) -> None: ...
72
+ def _set_sparse_metadata(self, global_unique_ids: dict[str, Tensor]) -> None: ...
73
+ def _reset_state(self) -> None: ...
74
+ def _update_process_group(self, new_process_group: ProcessGroup) -> None: ...
75
+
76
+ class DDPLoggingData:
77
+ strs_map: dict[str, str]
78
+ ints_map: dict[str, int]
79
+
80
+ class Logger:
81
+ def __init__(self, reducer: Reducer) -> None: ...
82
+ def set_construction_data_and_log(
83
+ self,
84
+ module_name: str,
85
+ device_ids: list[int],
86
+ output_device: int,
87
+ broadcast_buffers: bool,
88
+ has_sync_bn: bool,
89
+ static_graph: bool,
90
+ ): ...
91
+ def set_runtime_stats_and_log(self) -> None: ...
92
+ def set_error_and_log(self, error: str) -> None: ...
93
+ def _get_ddp_logging_data(self) -> DDPLoggingData: ...
94
+ def _set_comm_hook_name(self, comm_hook: str) -> None: ...
95
+ def _set_uneven_input_join(self) -> None: ...
96
+ def _set_static_graph(self) -> None: ...
97
+
98
+ class _WorkerServer:
99
+ def __init__(self, socket_path: str) -> None: ...
100
+ def shutdown(self) -> None: ...
101
+
102
+ def get_debug_level(): ...
103
+ def set_debug_level(): ...
104
+ def set_debug_level_from_env(): ...
105
+
106
+ class DebugLevel(Enum):
107
+ OFF = ...
108
+ INFO = ...
109
+ DETAIL = ...
110
+
111
+ class ReduceOp:
112
+ def __init__(self, op: RedOpType) -> None: ...
113
+
114
+ SUM: RedOpType = ...
115
+ AVG: RedOpType = ...
116
+ PRODUCT: RedOpType = ...
117
+ MIN: RedOpType = ...
118
+ MAX: RedOpType = ...
119
+ BAND: RedOpType = ...
120
+ BOR: RedOpType = ...
121
+ BXOR: RedOpType = ...
122
+ PREMUL_SUM: RedOpType = ...
123
+ UNUSED: RedOpType = ...
124
+
125
+ class RedOpType(Enum): ...
126
+
127
+ class BroadcastOptions:
128
+ rootRank: int
129
+ rootTensor: int
130
+ timeout: timedelta
131
+ asyncOp: bool
132
+
133
+ class AllreduceOptions:
134
+ reduceOp: ReduceOp
135
+ timeout: timedelta
136
+
137
+ class AllreduceCoalescedOptions(AllreduceOptions): ...
138
+
139
+ class ReduceOptions:
140
+ reduceOp: ReduceOp
141
+ rootRank: int
142
+ rootTensor: int
143
+ timeout: timedelta
144
+
145
+ class AllgatherOptions:
146
+ timeout: timedelta
147
+ asyncOp: bool
148
+
149
+ class GatherOptions:
150
+ rootRank: int
151
+ timeout: timedelta
152
+
153
+ class ScatterOptions:
154
+ rootRank: int
155
+ timeout: timedelta
156
+ asyncOp: bool
157
+
158
+ class ReduceScatterOptions:
159
+ reduceOp: ReduceOp
160
+ timeout: timedelta
161
+ asyncOp: bool
162
+
163
+ class BarrierOptions:
164
+ device_ids: list[int]
165
+ device: torch.device
166
+ timeout: timedelta
167
+
168
+ class AllToAllOptions:
169
+ timeout: timedelta
170
+
171
+ class Store:
172
+ def set(self, key: str, value: str): ...
173
+ def get(self, key: str) -> bytes: ...
174
+ def add(self, key: str, value: int) -> int: ...
175
+ def compare_set(
176
+ self,
177
+ key: str,
178
+ expected_value: str,
179
+ desired_value: str,
180
+ ) -> bytes: ...
181
+ def delete_key(self, key: str) -> bool: ...
182
+ def num_keys(self) -> int: ...
183
+ def set_timeout(self, timeout: timedelta): ...
184
+ @overload
185
+ def wait(self, keys: list[str]): ...
186
+ @overload
187
+ def wait(self, keys: list[str], timeout: timedelta): ...
188
+
189
+ class FileStore(Store):
190
+ def __init__(self, path: str, numWorkers: int = ...) -> None: ...
191
+
192
+ class HashStore(Store):
193
+ def __init__(self) -> None: ...
194
+
195
+ class TCPStore(Store):
196
+ def __init__(
197
+ self,
198
+ host_name: str,
199
+ port: int,
200
+ world_size: int | None = ...,
201
+ is_master: bool = ...,
202
+ timeout: timedelta = ...,
203
+ wait_for_workers: bool = ...,
204
+ multi_tenant: bool = ...,
205
+ master_listen_fd: int | None = ...,
206
+ use_libuv: bool | None = ...,
207
+ ) -> None: ...
208
+ @property
209
+ def host(self) -> str: ...
210
+ @property
211
+ def port(self) -> int: ...
212
+
213
+ class PrefixStore(Store):
214
+ def __init__(self, prefix: str, store: Store) -> None: ...
215
+ @property
216
+ def underlying_store(self) -> Store: ...
217
+
218
+ class _ControlCollectives:
219
+ def barrier(self, key: str, timeout: timedelta, blocking: bool) -> None: ...
220
+ def broadcast_send(self, key: str, data: str, timeout: timedelta) -> None: ...
221
+ def broadcast_recv(self, key: str, timeout: timedelta) -> str: ...
222
+ def gather_send(self, key: str, data: str, timeout: timedelta) -> None: ...
223
+ def gather_recv(self, key: str, timeout: timedelta) -> str: ...
224
+ def scatter_send(self, key: str, data: str, timeout: timedelta) -> None: ...
225
+ def scatter_recv(self, key: str, timeout: timedelta) -> str: ...
226
+ def all_gather(self, key: str, data: str, timeout: timedelta) -> str: ...
227
+ def all_sum(self, key: str, data: int, timeout: timedelta) -> int: ...
228
+
229
+ class _StoreCollectives(_ControlCollectives):
230
+ def __init__(self, store: Store, rank: int, world_size: int) -> None: ...
231
+
232
+ class _DistributedBackendOptions:
233
+ def __init__(self) -> None: ...
234
+ @property
235
+ def store(self) -> Store: ...
236
+ @store.setter
237
+ def store(self, store: Store) -> None: ...
238
+ @property
239
+ def group_rank(self) -> int: ...
240
+ @group_rank.setter
241
+ def group_rank(self, rank: int) -> None: ...
242
+ @property
243
+ def group_size(self) -> int: ...
244
+ @group_size.setter
245
+ def group_size(self, size: int) -> None: ...
246
+ @property
247
+ def timeout(self) -> timedelta: ...
248
+ @timeout.setter
249
+ def timeout(self, timeout: timedelta) -> None: ...
250
+ @property
251
+ def group_id(self) -> str: ...
252
+ @group_id.setter
253
+ def group_id(self, group_id: str) -> None: ...
254
+ @property
255
+ def global_ranks_in_group(self) -> list[int]: ...
256
+ @global_ranks_in_group.setter
257
+ def global_ranks_in_group(self, ranks: list[int]) -> None: ...
258
+
259
+ class Work:
260
+ def is_completed(self) -> bool: ...
261
+ def is_success(self) -> bool: ...
262
+ def exception(self) -> Any: ...
263
+ def wait(self, timeout: timedelta = ...) -> bool: ...
264
+ def get_future(self) -> Future: ...
265
+ def source_rank(self) -> int: ...
266
+ def _source_rank(self) -> int: ...
267
+ def result(self) -> list[Tensor]: ...
268
+ def synchronize(self): ...
269
+ def boxed(self) -> ScriptObject: ...
270
+ @staticmethod
271
+ def unbox(obj: ScriptObject) -> Work: ...
272
+
273
+ class Backend:
274
+ class Options:
275
+ def __init__(self, backend: str, timeout: timedelta = ...) -> None: ...
276
+ @property
277
+ def backend(self) -> str: ...
278
+ @property
279
+ def _timeout(self) -> timedelta: ...
280
+ @_timeout.setter
281
+ def _timeout(self, val: timedelta) -> None: ...
282
+
283
+ def __init__(
284
+ self,
285
+ rank: int,
286
+ size: int,
287
+ ) -> None: ...
288
+ @property
289
+ def supports_splitting(self) -> bool: ...
290
+ @property
291
+ def options(self) -> Options: ...
292
+ def rank(self) -> int: ...
293
+ def size(self) -> int: ...
294
+ def eager_connect_single_device(self, device: torch.device | None) -> None: ...
295
+ def _set_sequence_number_for_group(self) -> None: ...
296
+ def _set_default_timeout(self, timeout: timedelta) -> None: ...
297
+
298
+ class ProcessGroup:
299
+ class Options:
300
+ def __init__(self, backend: str, timeout: timedelta = ...) -> None: ...
301
+ @property
302
+ def backend(self) -> str: ...
303
+ @property
304
+ def _timeout(self) -> timedelta: ...
305
+ @_timeout.setter
306
+ def _timeout(self, val: timedelta) -> None: ...
307
+
308
+ class BackendType(Enum):
309
+ UNDEFINED = ...
310
+ GLOO = ...
311
+ NCCL = ...
312
+ UCC = ...
313
+ MPI = ...
314
+ CUSTOM = ...
315
+
316
+ def __init__(
317
+ self,
318
+ store: Store,
319
+ rank: int,
320
+ size: int,
321
+ options: Options,
322
+ ) -> None: ...
323
+ def rank(self) -> int: ...
324
+ def size(self) -> int: ...
325
+ @overload
326
+ def broadcast(
327
+ self,
328
+ tensors: list[Tensor],
329
+ opts=...,
330
+ ) -> Work: ...
331
+ @overload
332
+ def broadcast(
333
+ self,
334
+ tensor: Tensor,
335
+ root: int,
336
+ ) -> Work: ...
337
+ @overload
338
+ def allreduce(
339
+ self,
340
+ tensors: list[Tensor],
341
+ opts: AllreduceOptions = ...,
342
+ ) -> Work: ...
343
+ @overload
344
+ def allreduce(
345
+ self,
346
+ tensors: list[Tensor],
347
+ op=...,
348
+ ) -> Work: ...
349
+ @overload
350
+ def allreduce(
351
+ self,
352
+ tensor: Tensor,
353
+ op=...,
354
+ ) -> Work: ...
355
+ def allreduce_coalesced(
356
+ self,
357
+ tensors: list[Tensor],
358
+ opts=...,
359
+ ) -> Work: ...
360
+ def reduce_scatter_tensor_coalesced(
361
+ self,
362
+ outputTensors: list[Tensor],
363
+ inputTensors: list[Tensor],
364
+ opts: ReduceScatterOptions | None = None,
365
+ ) -> Work: ...
366
+ @overload
367
+ def reduce(
368
+ self,
369
+ tensors: list[Tensor],
370
+ opts=...,
371
+ ) -> Work: ...
372
+ @overload
373
+ def reduce(
374
+ self,
375
+ tensor: Tensor,
376
+ root: int,
377
+ op=...,
378
+ ) -> Work: ...
379
+ @overload
380
+ def allgather(
381
+ self,
382
+ output_tensors: list[list[Tensor]],
383
+ input_tensors: list[Tensor],
384
+ opts=...,
385
+ ) -> Work: ...
386
+ @overload
387
+ def allgather(
388
+ self,
389
+ output_tensors: list[Tensor],
390
+ input_tensor: Tensor,
391
+ ) -> Work: ...
392
+ def _allgather_base(
393
+ self,
394
+ output: Tensor,
395
+ input: Tensor,
396
+ opts=...,
397
+ ) -> Work: ...
398
+ def allgather_coalesced(
399
+ self,
400
+ output_lists: list[list[Tensor]],
401
+ input_list: list[Tensor],
402
+ opts=...,
403
+ ) -> Work: ...
404
+ def allgather_into_tensor_coalesced(
405
+ self,
406
+ output_lists: list[Tensor],
407
+ input_list: list[Tensor],
408
+ opts=...,
409
+ ) -> Work: ...
410
+ @overload
411
+ def gather(
412
+ self,
413
+ output_tensors: list[list[Tensor]],
414
+ input_tensors: list[Tensor],
415
+ opts=...,
416
+ ) -> Work: ...
417
+ @overload
418
+ def gather(
419
+ self,
420
+ output_tensors: list[Tensor],
421
+ input_tensor: Tensor,
422
+ root: int,
423
+ ) -> Work: ...
424
+ @overload
425
+ def scatter(
426
+ self,
427
+ output_tensors: list[Tensor],
428
+ input_tensors: list[list[Tensor]],
429
+ opts=...,
430
+ ) -> Work: ...
431
+ @overload
432
+ def scatter(
433
+ self,
434
+ output_tensor: Tensor,
435
+ input_tensors: list[Tensor],
436
+ root: int,
437
+ ) -> Work: ...
438
+ @overload
439
+ def reduce_scatter(
440
+ self,
441
+ output_tensors: list[Tensor],
442
+ input_tensors: list[list[Tensor]],
443
+ opts=...,
444
+ ) -> Work: ...
445
+ @overload
446
+ def reduce_scatter(
447
+ self,
448
+ output_tensors: Tensor,
449
+ input_tensor: list[Tensor],
450
+ ) -> Work: ...
451
+ def _reduce_scatter_base(
452
+ self,
453
+ outputTensor: Tensor,
454
+ inputTensor: Tensor,
455
+ opts: ReduceScatterOptions | None,
456
+ ) -> Work: ...
457
+ @overload
458
+ def alltoall_base(
459
+ self,
460
+ output_tensor: Tensor,
461
+ input_tensor: Tensor,
462
+ output_split_sizes: list[int],
463
+ input_split_sizes: list[int],
464
+ opts=...,
465
+ ) -> Work: ...
466
+ @overload
467
+ def alltoall_base(
468
+ self,
469
+ output: Tensor,
470
+ input: Tensor,
471
+ output_split_sizes: list[int],
472
+ input_split_sizes: list[int],
473
+ ) -> Work: ...
474
+ @overload
475
+ def alltoall(
476
+ self,
477
+ output_tensor: list[Tensor],
478
+ input_tensor: list[Tensor],
479
+ opts=...,
480
+ ) -> Work: ...
481
+ @overload
482
+ def alltoall(
483
+ self,
484
+ output: list[Tensor],
485
+ input: list[Tensor],
486
+ ) -> Work: ...
487
+ def send(
488
+ self,
489
+ tensors: list[Tensor],
490
+ dstRank: int,
491
+ tag: int,
492
+ ) -> Work: ...
493
+ def recv(
494
+ self,
495
+ tensors: list[Tensor],
496
+ srcRank: int,
497
+ tag: int,
498
+ ) -> Work: ...
499
+ def recv_anysource(self, tensors: list[Tensor], tag: int) -> Work: ...
500
+ def barrier(self, opts=...) -> Work: ...
501
+ def boxed(self) -> ScriptObject: ...
502
+ @staticmethod
503
+ def unbox(obj: ScriptObject) -> ProcessGroup: ...
504
+ def _start_coalescing(self, device: torch.device) -> None: ...
505
+ def _end_coalescing(self, device: torch.device) -> Work: ...
506
+ def _get_backend_name(self) -> str: ...
507
+ def _backend_id(self, backend_type: BackendType) -> int: ...
508
+ @property
509
+ def _device_types(self) -> list[torch.device]: ...
510
+ def _get_backend(self, device: torch.device) -> Backend: ...
511
+ def _register_backend(
512
+ self,
513
+ device: torch.device,
514
+ backend_type: BackendType,
515
+ backend: Backend | None,
516
+ ) -> None: ...
517
+ def _set_group_name(self, name: str) -> None: ...
518
+ def _set_group_desc(self, desc: str) -> None: ...
519
+ def name(self) -> str: ...
520
+ def _has_hooks(self) -> bool: ...
521
+ def _wait_for_pending_works(self) -> None: ...
522
+ def _set_sequence_number_for_group(self) -> None: ...
523
+ @property
524
+ def bound_device_id(self) -> torch.device | None: ...
525
+ @bound_device_id.setter
526
+ def bound_device_id(self, device: torch.device | None) -> None: ...
527
+ @property
528
+ def group_name(self) -> str: ...
529
+ @property
530
+ def group_desc(self) -> str: ...
531
+
532
+ class ProcessGroupGloo(Backend):
533
+ class Device: ...
534
+
535
+ class Options(ProcessGroup.Options):
536
+ devices: list[ProcessGroupGloo.Device]
537
+ threads: int
538
+
539
+ def __init__(self): ...
540
+
541
+ def __init__(
542
+ self,
543
+ store: Store,
544
+ rank: int,
545
+ size: int,
546
+ timeout: timedelta,
547
+ ) -> None: ...
548
+ @staticmethod
549
+ def create_device(hostname="", interface="") -> Device: ...
550
+ @staticmethod
551
+ def create_default_device() -> Device: ...
552
+ def _set_default_timeout(self, timeout) -> None: ...
553
+
554
+ class _ProcessGroupWrapper(Backend):
555
+ def __init__(self, pg: Backend, gloo_pg: ProcessGroupGloo) -> None: ...
556
+ wrapped_pg: Backend
557
+
558
+ class ProcessGroupNCCL(Backend):
559
+ class NCCLConfig:
560
+ blocking: int
561
+ cga_cluster_size: int
562
+ min_ctas: int
563
+ max_ctas: int
564
+
565
+ class Options(ProcessGroup.Options):
566
+ config: ProcessGroupNCCL.NCCLConfig
567
+ is_high_priority_stream: bool
568
+ split_from: ProcessGroupNCCL
569
+ split_color: int
570
+ global_ranks_in_group: list[int]
571
+ group_name: str
572
+
573
+ def __init__(self, is_high_priority_stream: bool = False): ...
574
+
575
+ def __init__(
576
+ self,
577
+ store: Store,
578
+ rank: int,
579
+ size: int,
580
+ options: Options,
581
+ ) -> None: ...
582
+ def _group_start(self) -> None: ...
583
+ def _group_end(self) -> None: ...
584
+ def _set_default_timeout(self, timeout) -> None: ...
585
+ def _shutdown(self) -> None: ...
586
+ def perform_nocolor_split(self, device: torch.device) -> None: ...
587
+ def comm_split_count(self) -> int: ...
588
+ def _add_ephemeral_timeout(self, timeout: timedelta) -> None: ...
589
+ @property
590
+ def uid(self) -> int: ...
591
+ @property
592
+ def options(self) -> Options: ... # type: ignore[override]
593
+
594
+ class ProcessGroupUCC(Backend):
595
+ def __init__(
596
+ self,
597
+ store: Store,
598
+ rank: int,
599
+ size: int,
600
+ timeout: timedelta,
601
+ ) -> None: ...
602
+
603
+ class ProcessGroupMPI(Backend):
604
+ def __init__(
605
+ self,
606
+ rank: int,
607
+ size: int,
608
+ pgComm: int,
609
+ ) -> None: ...
610
+ @staticmethod
611
+ def create(ranks: list[int]) -> ProcessGroupMPI: ...
612
+
613
+ def _compute_bucket_assignment_by_size(
614
+ tensors: list[Tensor],
615
+ bucket_size_limits: list[int],
616
+ expect_sparse_gradient: list[bool] = ...,
617
+ tensor_indices: list[int] = ...,
618
+ ) -> tuple[list[list[int]], list[int]]: ...
619
+ def _broadcast_coalesced(
620
+ process_group: ProcessGroup,
621
+ tensors: list[Tensor],
622
+ buffer_size: int,
623
+ src: int,
624
+ ): ...
625
+ def _test_python_store(store: Store): ...
626
+ def _verify_params_across_processes(
627
+ process_group: ProcessGroup,
628
+ params: list[Tensor],
629
+ logger: Logger | None,
630
+ ): ...
631
+ def _make_nccl_premul_sum(factor: float | list[Tensor]) -> ReduceOp: ...
632
+ def _register_process_group(
633
+ group_name: str,
634
+ process_group: ProcessGroup,
635
+ ) -> None: ...
636
+ def _resolve_process_group(group_name: str) -> ProcessGroup: ...
637
+ def _register_work(tensor: torch.Tensor, work: Work) -> ProcessGroup: ...
638
+ def _unregister_all_process_groups() -> None: ...
639
+ def _unregister_process_group(group_name: str) -> None: ...
640
+
641
+ class _SymmetricMemory:
642
+ @staticmethod
643
+ def set_group_info(
644
+ group_name: str,
645
+ rank: int,
646
+ world_size: int,
647
+ store: Store,
648
+ ) -> None: ...
649
+ @staticmethod
650
+ def empty_strided_p2p(
651
+ size: torch.types._size,
652
+ stride: torch.types._size,
653
+ dtype: torch.dtype,
654
+ device: torch.device,
655
+ group_name: str,
656
+ ) -> torch.Tensor: ...
657
+ @property
658
+ def rank(self) -> int: ...
659
+ @property
660
+ def world_size(self) -> int: ...
661
+ @staticmethod
662
+ def rendezvous(tensor: torch.Tensor) -> _SymmetricMemory: ...
663
+ def get_buffer(
664
+ self,
665
+ rank: int,
666
+ sizes: torch.types._size,
667
+ dtype: torch.dtype,
668
+ storage_offset: int | None = 0,
669
+ ) -> torch.Tensor: ...
670
+ def barrier(self, channel: int = 0) -> None: ...
671
+ def put_signal(self, dst_rank: int, channel: int = 0) -> None: ...
672
+ def wait_signal(self, src_rank: int, channel: int = 0) -> None: ...
673
+
674
+ class ProcessGroupCudaP2P(Backend):
675
+ class Options:
676
+ nccl_options: Optional[ProcessGroupNCCL.Options]
677
+ buffer_size: Optional[int]
678
+
679
+ def __init__(self) -> None: ...
680
+
681
+ def __init__(
682
+ self,
683
+ store: Store,
684
+ rank: int,
685
+ size: int,
686
+ options: ProcessGroupCudaP2P.Options,
687
+ ) -> None: ...
688
+ def is_p2p_available(self) -> bool: ...
689
+ def get_buffer_size(self) -> int: ...
690
+ def stream(self) -> torch.cuda.Stream: ...
691
+ def intra_node_barrier(self) -> Work: ...
692
+ def get_p2p_buffer(
693
+ self,
694
+ rank: int,
695
+ sizes: torch.Size,
696
+ dtype: torch.dtype,
697
+ storage_offset: Optional[int] = 0,
698
+ ) -> torch.Tensor: ...
699
+ def _shutdown(self) -> None: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # mypy: disable-error-code="type-arg"
3
+ from datetime import timedelta
4
+ from typing import Any, Generic, overload, TypeVar
5
+
6
+ import torch
7
+ from torch._C import Future
8
+ from torch._C._autograd import ProfilerEvent
9
+ from torch._C._distributed_c10d import Store
10
+ from torch._C._profiler import ProfilerConfig
11
+
12
+ # This module is defined in torch/csrc/distributed/rpc/init.cpp
13
+
14
+ _DEFAULT_INIT_METHOD: str
15
+ _DEFAULT_NUM_WORKER_THREADS: int
16
+ _UNSET_RPC_TIMEOUT: float
17
+ _DEFAULT_RPC_TIMEOUT_SEC: float
18
+
19
+ _T = TypeVar("_T")
20
+
21
+ class RpcBackendOptions:
22
+ rpc_timeout: float
23
+ init_method: str
24
+ def __init__(
25
+ self,
26
+ rpc_timeout: float = ...,
27
+ init_method: str = ...,
28
+ ) -> None: ...
29
+
30
+ class WorkerInfo:
31
+ def __init__(self, name: str, worker_id: int) -> None: ...
32
+ @property
33
+ def name(self) -> str: ...
34
+ @property
35
+ def id(self) -> int: ...
36
+ def __eq__(self, other: object) -> bool: ...
37
+
38
+ class RpcAgent:
39
+ def join(self, shutdown: bool = False, timeout: float = 0): ...
40
+ def sync(self): ...
41
+ def shutdown(self): ...
42
+ @overload
43
+ def get_worker_info(self) -> WorkerInfo: ...
44
+ @overload
45
+ def get_worker_info(self, workerName: str) -> WorkerInfo: ...
46
+ def get_worker_infos(self) -> list[WorkerInfo]: ...
47
+ def _get_device_map(self, dst: WorkerInfo) -> dict[torch.device, torch.device]: ...
48
+ def get_debug_info(self) -> dict[str, str]: ...
49
+ def get_metrics(self) -> dict[str, str]: ...
50
+
51
+ class PyRRef(Generic[_T]):
52
+ def __init__(self, value: _T, type_hint: Any = None) -> None: ...
53
+ def is_owner(self) -> bool: ...
54
+ def confirmed_by_owner(self) -> bool: ...
55
+ def owner(self) -> WorkerInfo: ...
56
+ def owner_name(self) -> str: ...
57
+ def to_here(self, timeout: float = ...) -> _T: ...
58
+ def local_value(self) -> Any: ...
59
+ def rpc_sync(self, timeout: float = ...) -> Any: ...
60
+ def rpc_async(self, timeout: float = ...) -> Any: ...
61
+ def remote(self, timeout: float = ...) -> Any: ...
62
+ def _serialize(self) -> tuple: ...
63
+ @staticmethod
64
+ def _deserialize(tp: tuple) -> PyRRef: ...
65
+ def _get_type(self) -> type[_T]: ...
66
+ def _get_future(self) -> Future[_T]: ...
67
+ def _get_profiling_future(self) -> Future[_T]: ...
68
+ def _set_profiling_future(self, profilingFuture: Future[_T]): ...
69
+
70
+ class _TensorPipeRpcBackendOptionsBase(RpcBackendOptions):
71
+ num_worker_threads: int
72
+ device_maps: dict[str, dict[torch.device, torch.device]]
73
+ devices: list[torch.device]
74
+ def __init__(
75
+ self,
76
+ num_worker_threads: int,
77
+ _transports: list | None,
78
+ _channels: list | None,
79
+ rpc_timeout: float = ...,
80
+ init_method: str = ...,
81
+ device_maps: dict[str, dict[torch.device, torch.device]] = {}, # noqa: B006
82
+ devices: list[torch.device] = [], # noqa: B006
83
+ ) -> None: ...
84
+ def _set_device_map(
85
+ self,
86
+ to: str,
87
+ device_map: dict[torch.device, torch.device],
88
+ ): ...
89
+
90
+ class TensorPipeAgent(RpcAgent):
91
+ def __init__(
92
+ self,
93
+ store: Store,
94
+ name: str,
95
+ worker_id: int,
96
+ world_size: int | None,
97
+ opts: _TensorPipeRpcBackendOptionsBase,
98
+ reverse_device_maps: dict[str, dict[torch.device, torch.device]],
99
+ devices: list[torch.device],
100
+ ) -> None: ...
101
+ def join(self, shutdown: bool = False, timeout: float = 0): ...
102
+ def shutdown(self): ...
103
+ @overload
104
+ def get_worker_info(self) -> WorkerInfo: ...
105
+ @overload
106
+ def get_worker_info(self, workerName: str) -> WorkerInfo: ...
107
+ @overload
108
+ def get_worker_info(self, id: int) -> WorkerInfo: ...
109
+ def get_worker_infos(self) -> list[WorkerInfo]: ...
110
+ def _get_device_map(self, dst: WorkerInfo) -> dict[torch.device, torch.device]: ...
111
+ def _update_group_membership(
112
+ self,
113
+ worker_info: WorkerInfo,
114
+ my_devices: list[torch.device],
115
+ reverse_device_map: dict[str, dict[torch.device, torch.device]],
116
+ is_join: bool,
117
+ ): ...
118
+ def _get_backend_options(self) -> _TensorPipeRpcBackendOptionsBase: ...
119
+ @property
120
+ def is_static_group(self) -> bool: ...
121
+ @property
122
+ def store(self) -> Store: ...
123
+
124
+ def _is_current_rpc_agent_set() -> bool: ...
125
+ def _get_current_rpc_agent() -> RpcAgent: ...
126
+ def _set_and_start_rpc_agent(agent: RpcAgent): ...
127
+ def _reset_current_rpc_agent(): ...
128
+ def _delete_all_user_and_unforked_owner_rrefs(timeout: timedelta = ...): ...
129
+ def _destroy_rref_context(ignoreRRefLeak: bool): ...
130
+ def _rref_context_get_debug_info() -> dict[str, str]: ...
131
+ def _cleanup_python_rpc_handler(): ...
132
+ def _invoke_rpc_builtin(
133
+ dst: WorkerInfo,
134
+ opName: str,
135
+ rpcTimeoutSeconds: float,
136
+ *args: Any,
137
+ **kwargs: Any,
138
+ ): ...
139
+ def _invoke_rpc_python_udf(
140
+ dst: WorkerInfo,
141
+ pickledPythonUDF: str,
142
+ tensors: list[torch.Tensor],
143
+ rpcTimeoutSeconds: float,
144
+ isAsyncExecution: bool,
145
+ ): ...
146
+ def _invoke_rpc_torchscript(
147
+ dstWorkerName: str,
148
+ qualifiedNameStr: str,
149
+ argsTuple: tuple,
150
+ kwargsDict: dict,
151
+ rpcTimeoutSeconds: float,
152
+ isAsyncExecution: bool,
153
+ ): ...
154
+ def _invoke_remote_builtin(
155
+ dst: WorkerInfo,
156
+ opName: str,
157
+ rpcTimeoutSeconds: float,
158
+ *args: Any,
159
+ **kwargs: Any,
160
+ ): ...
161
+ def _invoke_remote_python_udf(
162
+ dst: WorkerInfo,
163
+ pickledPythonUDF: str,
164
+ tensors: list[torch.Tensor],
165
+ rpcTimeoutSeconds: float,
166
+ isAsyncExecution: bool,
167
+ ): ...
168
+ def _invoke_remote_torchscript(
169
+ dstWorkerName: WorkerInfo,
170
+ qualifiedNameStr: str,
171
+ rpcTimeoutSeconds: float,
172
+ isAsyncExecution: bool,
173
+ *args: Any,
174
+ **kwargs: Any,
175
+ ): ...
176
+ def get_rpc_timeout() -> float: ...
177
+ def enable_gil_profiling(flag: bool): ...
178
+ def _set_rpc_timeout(rpcTimeoutSeconds: float): ...
179
+
180
+ class RemoteProfilerManager:
181
+ @staticmethod
182
+ def set_current_profiling_key(key: str): ...
183
+
184
+ def _enable_server_process_global_profiler(new_config: ProfilerConfig): ...
185
+ def _disable_server_process_global_profiler() -> list[list[list[ProfilerEvent]]]: ...
186
+ def _set_profiler_node_id(default_node_id: int): ...
187
+ def _enable_jit_rref_pickle(): ...
188
+ def _disable_jit_rref_pickle(): ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._C._distributed_c10d import Store
3
+ from torch._C._distributed_rpc import _TensorPipeRpcBackendOptionsBase, TensorPipeAgent
4
+
5
+ # This module is defined in torch/csrc/distributed/rpc/testing/init.cpp
6
+
7
+ class FaultyTensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase):
8
+ def __init__(
9
+ self,
10
+ num_worker_threads: int,
11
+ rpc_timeout: float,
12
+ init_method: str,
13
+ messages_to_fail: list[str],
14
+ messages_to_delay: dict[str, float],
15
+ num_fail_sends: int,
16
+ ) -> None: ...
17
+ num_send_recv_threads: int
18
+ messages_to_fail: list[str]
19
+ messages_to_delay: dict[str, float]
20
+ num_fail_sends: int
21
+
22
+ class FaultyTensorPipeAgent(TensorPipeAgent):
23
+ def __init__(
24
+ self,
25
+ store: Store,
26
+ name: str,
27
+ rank: int,
28
+ world_size: int,
29
+ options: FaultyTensorPipeRpcBackendOptions,
30
+ reverse_device_maps: dict[str, dict[torch.device, torch.device]],
31
+ devices: list[torch.device],
32
+ ) -> None: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_functions.pyi ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import AnyStr
2
+
3
+ from torch import Tensor
4
+
5
+ class UndefinedGrad:
6
+ def __init__(self) -> None: ...
7
+ def __call__(self, *inputs: Tensor) -> list[Tensor]: ...
8
+
9
+ class DelayedError:
10
+ def __init__(self, msg: AnyStr, num_inputs: int) -> None: ...
11
+ def __call__(self, inputs: list[Tensor]) -> list[Tensor]: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_functorch.pyi ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from enum import Enum
3
+
4
+ from torch import Tensor
5
+
6
+ # Defined in torch/csrc/functorch/init.cpp
7
+
8
+ def _set_dynamic_layer_keys_included(included: bool) -> None: ...
9
+ def get_unwrapped(tensor: Tensor) -> Tensor: ...
10
+ def is_batchedtensor(tensor: Tensor) -> bool: ...
11
+ def is_functionaltensor(tensor: Tensor) -> bool: ...
12
+ def is_functorch_wrapped_tensor(tensor: Tensor) -> bool: ...
13
+ def is_gradtrackingtensor(tensor: Tensor) -> bool: ...
14
+ def is_legacy_batchedtensor(tensor: Tensor) -> bool: ...
15
+ def maybe_get_bdim(tensor: Tensor) -> int: ...
16
+ def maybe_get_level(tensor: Tensor) -> int: ...
17
+ def maybe_current_level() -> int | None: ...
18
+ def unwrap_if_dead(tensor: Tensor) -> Tensor: ...
19
+ def _unwrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
20
+ def _wrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
21
+ def _unwrap_batched(tensor: Tensor, level: int) -> tuple[Tensor, int | None]: ...
22
+ def current_level() -> int: ...
23
+ def count_jvp_interpreters() -> int: ...
24
+ def _add_batch_dim(tensor: Tensor, bdim: int, level: int) -> Tensor: ...
25
+ def set_single_level_autograd_function_allowed(allowed: bool) -> None: ...
26
+ def get_single_level_autograd_function_allowed() -> bool: ...
27
+ def _unwrap_functional_tensor(tensor: Tensor, reapply_views: bool) -> Tensor: ...
28
+ def _wrap_functional_tensor(tensor: Tensor, level: int) -> Tensor: ...
29
+ def _vmap_increment_nesting(batch_size: int, randomness: str) -> int: ...
30
+ def _vmap_decrement_nesting() -> int: ...
31
+ def _grad_increment_nesting() -> int: ...
32
+ def _grad_decrement_nesting() -> int: ...
33
+ def _jvp_increment_nesting() -> int: ...
34
+ def _jvp_decrement_nesting() -> int: ...
35
+
36
+ # Defined in aten/src/ATen/functorch/Interpreter.h
37
+ class TransformType(Enum):
38
+ Torch: TransformType = ...
39
+ Vmap: TransformType = ...
40
+ Grad: TransformType = ...
41
+ Jvp: TransformType = ...
42
+ Functionalize: TransformType = ...
43
+
44
+ class RandomnessType(Enum):
45
+ Error: TransformType = ...
46
+ Same: TransformType = ...
47
+ Different: TransformType = ...
48
+
49
+ class CInterpreter:
50
+ def key(self) -> TransformType: ...
51
+ def level(self) -> int: ...
52
+
53
+ class CGradInterpreterPtr:
54
+ def __init__(self, interpreter: CInterpreter) -> None: ...
55
+ def lift(self, Tensor) -> Tensor: ...
56
+ def prevGradMode(self) -> bool: ...
57
+
58
+ class CJvpInterpreterPtr:
59
+ def __init__(self, interpreter: CInterpreter) -> None: ...
60
+ def lift(self, Tensor) -> Tensor: ...
61
+ def prevFwdGradMode(self) -> bool: ...
62
+
63
+ class CFunctionalizeInterpreterPtr:
64
+ def __init__(self, interpreter: CInterpreter) -> None: ...
65
+ def key(self) -> TransformType: ...
66
+ def level(self) -> int: ...
67
+ def functionalizeAddBackViews(self) -> bool: ...
68
+
69
+ class CVmapInterpreterPtr:
70
+ def __init__(self, interpreter: CInterpreter) -> None: ...
71
+ def key(self) -> TransformType: ...
72
+ def level(self) -> int: ...
73
+ def batchSize(self) -> int: ...
74
+ def randomness(self) -> RandomnessType: ...
75
+
76
+ class DynamicLayer: ...
77
+
78
+ def get_dynamic_layer_stack_depth() -> int: ...
79
+ def get_interpreter_stack() -> list[CInterpreter]: ...
80
+ def peek_interpreter_stack() -> CInterpreter: ...
81
+ def pop_dynamic_layer_stack() -> DynamicLayer: ...
82
+ def pop_dynamic_layer_stack_and_undo_to_depth(int) -> None: ...
83
+ def push_dynamic_layer_stack(dl: DynamicLayer) -> int: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_instruction_counter.pyi ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Defined in torch/csrc/instruction_counter/Module.cpp
2
+
3
+ def start() -> int: ...
4
+ def end(id: int) -> int: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_itt.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Defined in torch/csrc/itt.cpp
2
+ def is_available() -> None: ...
3
+ def rangePush(message: str) -> None: ...
4
+ def rangePop() -> None: ...
5
+ def mark(message: str) -> None: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_lazy.pyi ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from torch import Tensor
3
+
4
+ # defined in torch/csrc/lazy/python/init.cpp
5
+ def _mark_step(device: str, devices: list[str], wait: bool): ...
6
+ def _wait_device_ops(devices: list[str]): ...
7
+ def _reset_metrics(): ...
8
+ def _counter_names() -> list[str]: ...
9
+ def _counter_value(name: str) -> int: ...
10
+ def _metrics_report() -> str: ...
11
+ def _get_graph_hash(tensors: list[Tensor]) -> str: ...
12
+ def _sync_multi(
13
+ tensors: list[Tensor],
14
+ devices: list[str],
15
+ wait: bool = True,
16
+ sync_ltc_data: bool = True,
17
+ ): ...
18
+ def _get_tensor_id(tensor: Tensor) -> int: ...
19
+ def _get_tensors_text(tensors: list[Tensor]) -> str: ...
20
+ def _get_tensors_dot(tensors: list[Tensor]) -> str: ...
21
+ def _get_tensors_backend(tensors: list[Tensor]) -> str: ...
22
+ def _get_force_fallback() -> str: ...
23
+ def _set_force_fallback(newval: str): ...
24
+ def _clear_ir_cache(): ...
25
+ def _dump_ir_cache(filename: str): ...
26
+ def _set_reuse_ir(val: bool): ...
27
+ def _get_default_device_type(): ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # defined in torch/csrc/lazy/python/init.cpp
3
+
4
+ from typing import Any
5
+
6
+ from torch import Tensor
7
+
8
+ def _init(): ...
9
+ def _get_tensors_ts_device_data_node(
10
+ tensors: list[Tensor],
11
+ ) -> tuple[list[int], list[Any]]: ...
12
+ def _run_cached_graph(hash_str: str, graph_inputs: list[Any]) -> list[Tensor]: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_monitor.pyi ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/monitor/python_init.cpp
2
+
3
+ import datetime
4
+ from enum import Enum
5
+ from typing import Callable
6
+
7
+ class Aggregation(Enum):
8
+ VALUE = ...
9
+ MEAN = ...
10
+ COUNT = ...
11
+ SUM = ...
12
+ MAX = ...
13
+ MIN = ...
14
+
15
+ class Stat:
16
+ name: str
17
+ count: int
18
+ def __init__(
19
+ self,
20
+ name: str,
21
+ aggregations: list[Aggregation],
22
+ window_size: int,
23
+ max_samples: int = -1,
24
+ ) -> None: ...
25
+ def add(self, v: float) -> None: ...
26
+ def get(self) -> dict[Aggregation, float]: ...
27
+
28
+ class Event:
29
+ name: str
30
+ timestamp: datetime.datetime
31
+ data: dict[str, int | float | bool | str]
32
+ def __init__(
33
+ self,
34
+ name: str,
35
+ timestamp: datetime.datetime,
36
+ data: dict[str, int | float | bool | str],
37
+ ) -> None: ...
38
+
39
+ def log_event(e: Event) -> None: ...
40
+
41
+ class EventHandlerHandle: ...
42
+
43
+ def register_event_handler(handler: Callable[[Event], None]) -> EventHandlerHandle: ...
44
+ def unregister_event_handler(handle: EventHandlerHandle) -> None: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_nn.pyi ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @generated by tools/pyi/gen_pyi.py from torch/_C/_nn.pyi.in
2
+ # mypy: disable-error-code="type-arg"
3
+
4
+ from typing import List, Literal, Optional, overload, Sequence, Tuple, Union
5
+
6
+ from torch import memory_format, Tensor
7
+ from torch.types import _bool, _device, _dtype, _int, _size
8
+
9
+ # Defined in tools/autograd/templates/python_nn_functions.cpp
10
+
11
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
12
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
13
+ def avg_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ...
14
+ def avg_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ...
15
+ def elu_(input: Tensor, alpha: float = ...) -> Tensor: ...
16
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ...
17
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ...
18
+ def gelu(input: Tensor, approximate: str = ...) -> Tensor: ...
19
+ def hardsigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
20
+ def hardtanh(input: Tensor, min_val: float = ..., max_val: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ...
21
+ def hardtanh_(input: Tensor, min_val: float = ..., max_val: float = ...) -> Tensor: ...
22
+ def leaky_relu(input: Tensor, negative_slope: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ...
23
+ def leaky_relu_(input: Tensor, negative_slope: float = ...) -> Tensor: ...
24
+ def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ...
25
+ def log_sigmoid(input: Tensor) -> Tensor: ...
26
+ def one_hot(tensor: Tensor, num_classes: int = ...) -> Tensor: ...
27
+ def pad(input: Tensor, pad: Sequence[int], mode: str = ..., value: Optional[float] = None) -> Tensor: ...
28
+ def scaled_dot_product_attention(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False) -> Tensor: ...
29
+ def softplus(input: Tensor, beta: float = ..., threshold: float = ...) -> Tensor: ...
30
+ def softshrink(input: Tensor, lambd: float = ...) -> Tensor: ...
31
+
32
+ # Defined in aten/src/ATen/native/mkldnn/Linear.cpp
33
+ def mkldnn_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ...
34
+
35
+ # Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
36
+ def mkldnn_reorder_conv2d_weight(
37
+ self: Tensor,
38
+ padding: List,
39
+ stride: List,
40
+ dilatation: List,
41
+ groups: int,
42
+ ) -> Tensor: ...
43
+ def mkldnn_reorder_conv3d_weight(
44
+ self: Tensor,
45
+ padding: List,
46
+ stride: List,
47
+ dilatation: List,
48
+ groups: int,
49
+ ) -> Tensor: ...
50
+
51
+ # Defined in aten/src/ATen/native/mkldnn/Prelu.cpp
52
+ def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ...
53
+
54
+ # Defined at tools/autograd/templates/python_nn_functions.cpp
55
+ @overload
56
+ def _parse_to(
57
+ device: _device,
58
+ dtype: _dtype,
59
+ non_blocking: _bool,
60
+ copy: _bool,
61
+ *,
62
+ memory_format: memory_format,
63
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
64
+ @overload
65
+ def _parse_to(
66
+ dtype: _dtype,
67
+ non_blocking: _bool,
68
+ copy: _bool,
69
+ *,
70
+ memory_format: memory_format,
71
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
72
+ @overload
73
+ def _parse_to(
74
+ tensor: Tensor,
75
+ non_blocking: _bool,
76
+ copy: _bool,
77
+ *,
78
+ memory_format: memory_format,
79
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
80
+
81
+ # Defined in aten/src/ATen/native/PackedSequence.cpp
82
+ def pad_sequence(
83
+ sequences: Union[List[Tensor], Tuple[Tensor, ...]],
84
+ batch_first: bool = False,
85
+ padding_value: float = 0.0,
86
+ padding_side: Union[Literal["left", "right"], str] = "right",
87
+ ) -> Tensor: ...
88
+ def flatten_dense_tensors(tensors: List[Tensor]) -> Tensor: ...
89
+ def unflatten_dense_tensors(flat: Tensor, tensors: List[Tensor]) -> List[Tensor]: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_nvtx.pyi ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # Defined in torch/csrc/cuda/shared/nvtx.cpp
3
+ def rangePushA(message: str) -> int: ...
4
+ def rangePop() -> int: ...
5
+ def rangeStartA(message: str) -> int: ...
6
+ def rangeEnd(int) -> None: ...
7
+ def markA(message: str) -> None: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_onnx.pyi ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/onnx/init.cpp
2
+
3
+ from enum import Enum
4
+
5
+ PRODUCER_VERSION: str
6
+
7
+ class TensorProtoDataType(Enum):
8
+ UNDEFINED = ...
9
+ FLOAT = ...
10
+ UINT8 = ...
11
+ INT8 = ...
12
+ UINT16 = ...
13
+ INT16 = ...
14
+ INT32 = ...
15
+ INT64 = ...
16
+ STRING = ...
17
+ BOOL = ...
18
+ FLOAT16 = ...
19
+ DOUBLE = ...
20
+ UINT32 = ...
21
+ UINT64 = ...
22
+ COMPLEX64 = ...
23
+ COMPLEX128 = ...
24
+ BFLOAT16 = ...
25
+ FLOAT8E5M2 = ...
26
+ FLOAT8E4M3FN = ...
27
+ FLOAT8E5M2FNUZ = ...
28
+ FLOAT8E4M3FNUZ = ...
29
+
30
+ class OperatorExportTypes(Enum):
31
+ ONNX = ...
32
+ ONNX_ATEN = ...
33
+ ONNX_ATEN_FALLBACK = ...
34
+ ONNX_FALLTHROUGH = ...
35
+
36
+ class TrainingMode(Enum):
37
+ EVAL = ...
38
+ PRESERVE = ...
39
+ TRAINING = ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_profiler.pyi ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Any, Literal
3
+ from typing_extensions import TypeAlias
4
+
5
+ from torch._C import device, dtype, layout
6
+
7
+ # defined in torch/csrc/profiler/python/init.cpp
8
+
9
+ class RecordScope(Enum):
10
+ FUNCTION = ...
11
+ BACKWARD_FUNCTION = ...
12
+ TORCHSCRIPT_FUNCTION = ...
13
+ KERNEL_FUNCTION_DTYPE = ...
14
+ CUSTOM_CLASS = ...
15
+ BUILD_FEATURE = ...
16
+ LITE_INTERPRETER = ...
17
+ USER_SCOPE = ...
18
+ STATIC_RUNTIME_OP = ...
19
+ STATIC_RUNTIME_MODEL = ...
20
+
21
+ class ProfilerState(Enum):
22
+ Disable = ...
23
+ CPU = ...
24
+ CUDA = ...
25
+ NVTX = ...
26
+ ITT = ...
27
+ KINETO = ...
28
+ KINETO_GPU_FALLBACK = ...
29
+ KINETO_PRIVATEUSE1_FALLBACK = ...
30
+ KINETO_PRIVATEUSE1 = ...
31
+
32
+ class ActiveProfilerType(Enum):
33
+ NONE = ...
34
+ LEGACY = ...
35
+ KINETO = ...
36
+ NVTX = ...
37
+ ITT = ...
38
+
39
+ class ProfilerActivity(Enum):
40
+ CPU = ...
41
+ CUDA = ...
42
+ XPU = ...
43
+ MTIA = ...
44
+ PrivateUse1 = ...
45
+
46
+ class _EventType(Enum):
47
+ TorchOp = ...
48
+ Backend = ...
49
+ Allocation = ...
50
+ OutOfMemory = ...
51
+ PyCall = ...
52
+ PyCCall = ...
53
+ Kineto = ...
54
+
55
+ class _ExperimentalConfig:
56
+ def __init__(
57
+ self,
58
+ profiler_metrics: list[str] = ...,
59
+ profiler_measure_per_kernel: bool = ...,
60
+ verbose: bool = ...,
61
+ performance_events: list[str] = ...,
62
+ enable_cuda_sync_events: bool = ...,
63
+ ) -> None: ...
64
+
65
+ class ProfilerConfig:
66
+ def __init__(
67
+ self,
68
+ state: ProfilerState,
69
+ report_input_shapes: bool,
70
+ profile_memory: bool,
71
+ with_stack: bool,
72
+ with_flops: bool,
73
+ with_modules: bool,
74
+ experimental_config: _ExperimentalConfig,
75
+ ) -> None: ...
76
+
77
+ class _ProfilerEvent:
78
+ start_tid: int
79
+ start_time_ns: int
80
+ children: list[_ProfilerEvent]
81
+
82
+ # TODO(robieta): remove in favor of `self.typed`
83
+ extra_fields: (
84
+ _ExtraFields_TorchOp
85
+ | _ExtraFields_Backend
86
+ | _ExtraFields_Allocation
87
+ | _ExtraFields_OutOfMemory
88
+ | _ExtraFields_PyCall
89
+ | _ExtraFields_PyCCall
90
+ | _ExtraFields_Kineto
91
+ )
92
+
93
+ @property
94
+ def typed(
95
+ self,
96
+ ) -> (
97
+ tuple[Literal[_EventType.TorchOp], _ExtraFields_TorchOp]
98
+ | tuple[Literal[_EventType.Backend], _ExtraFields_Backend]
99
+ | tuple[Literal[_EventType.Allocation], _ExtraFields_Allocation]
100
+ | tuple[Literal[_EventType.OutOfMemory], _ExtraFields_OutOfMemory]
101
+ | tuple[Literal[_EventType.PyCall], _ExtraFields_PyCall]
102
+ | tuple[Literal[_EventType.PyCCall], _ExtraFields_PyCCall]
103
+ | tuple[Literal[_EventType.Kineto], _ExtraFields_Kineto]
104
+ ): ...
105
+ @property
106
+ def name(self) -> str: ...
107
+ @property
108
+ def tag(self) -> _EventType: ...
109
+ @property
110
+ def id(self) -> int: ...
111
+ @property
112
+ def parent(self) -> _ProfilerEvent | None: ...
113
+ @property
114
+ def correlation_id(self) -> int: ...
115
+ @property
116
+ def end_time_ns(self) -> int: ...
117
+ @property
118
+ def duration_time_ns(self) -> int: ...
119
+
120
+ class _TensorMetadata:
121
+ impl_ptr: int | None
122
+ storage_data_ptr: int | None
123
+ id: int | None
124
+
125
+ @property
126
+ def allocation_id(self) -> int | None: ...
127
+ @property
128
+ def layout(self) -> layout: ...
129
+ @property
130
+ def device(self) -> device: ...
131
+ @property
132
+ def dtype(self) -> dtype: ...
133
+ @property
134
+ def sizes(self) -> list[int]: ...
135
+ @property
136
+ def strides(self) -> list[int]: ...
137
+
138
+ Scalar: TypeAlias = int | float | bool | complex
139
+ Input: TypeAlias = _TensorMetadata | list[_TensorMetadata] | Scalar | None
140
+
141
+ class _ExtraFields_TorchOp:
142
+ name: str
143
+ sequence_number: int
144
+ allow_tf32_cublas: bool
145
+
146
+ @property
147
+ def inputs(self) -> list[Input]: ...
148
+ @property
149
+ def scope(self) -> RecordScope: ...
150
+
151
+ class _ExtraFields_Backend: ...
152
+
153
+ class _ExtraFields_Allocation:
154
+ ptr: int
155
+ id: int | None
156
+ alloc_size: int
157
+ total_allocated: int
158
+ total_reserved: int
159
+
160
+ @property
161
+ def allocation_id(self) -> int | None: ...
162
+ @property
163
+ def device(self) -> device: ...
164
+
165
+ class _ExtraFields_OutOfMemory: ...
166
+
167
+ class _PyFrameState:
168
+ line_number: int
169
+ function_name: str
170
+
171
+ @property
172
+ def file_name(self) -> str: ...
173
+
174
+ class _NNModuleInfo:
175
+ @property
176
+ def self_ptr(self) -> int: ...
177
+ @property
178
+ def cls_ptr(self) -> int: ...
179
+ @property
180
+ def cls_name(self) -> str: ...
181
+ @property
182
+ def parameters(
183
+ self,
184
+ ) -> list[tuple[str, _TensorMetadata, _TensorMetadata | None]]: ...
185
+
186
+ class _OptimizerInfo:
187
+ @property
188
+ def parameters(
189
+ self,
190
+ ) -> list[
191
+ tuple[
192
+ # Parameter
193
+ _TensorMetadata,
194
+ #
195
+ # Gradient (if present during optimizer.step())
196
+ _TensorMetadata | None,
197
+ #
198
+ # Optimizer state for Parameter as (name, tensor) pairs
199
+ list[tuple[str, _TensorMetadata]],
200
+ ]
201
+ ]: ...
202
+
203
+ class _ExtraFields_PyCCall:
204
+ @property
205
+ def caller(self) -> _PyFrameState: ...
206
+
207
+ class _ExtraFields_PyCall:
208
+ @property
209
+ def callsite(self) -> _PyFrameState: ...
210
+ @property
211
+ def caller(self) -> _PyFrameState: ...
212
+ @property
213
+ def module(self) -> _NNModuleInfo | None: ...
214
+ @property
215
+ def optimizer(self) -> _OptimizerInfo | None: ...
216
+
217
+ class _ExtraFields_Kineto: ...
218
+
219
+ def _add_execution_trace_observer(output_file_path: str) -> bool: ...
220
+ def _remove_execution_trace_observer() -> None: ...
221
+ def _enable_execution_trace_observer() -> None: ...
222
+ def _disable_execution_trace_observer() -> None: ...
223
+ def _set_record_concrete_inputs_enabled_val(val: bool) -> None: ...
224
+ def _set_fwd_bwd_enabled_val(val: bool) -> None: ...
225
+ def _set_cuda_sync_enabled_val(val: bool) -> None: ...
226
+
227
+ class CapturedTraceback: ...
228
+
229
+ def gather_traceback(python: bool, script: bool, cpp: bool) -> CapturedTraceback: ...
230
+
231
+ # The Dict has name, filename, line
232
+ def symbolize_tracebacks(
233
+ to_symbolize: list[CapturedTraceback],
234
+ ) -> list[list[dict[str, str]]]: ...
235
+
236
+ class _RecordFunctionFast:
237
+ def __init__(
238
+ self,
239
+ name: str,
240
+ input_values: list | tuple | None = None,
241
+ keyword_values: dict | None = None,
242
+ ) -> None: ...
243
+ def __enter__(self) -> None: ...
244
+ def __exit__(self, *args: Any) -> None: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_C/_verbose.pyi ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Defined in torch/csrc/utils/verbose.cpp
2
+ def mkl_set_verbose(enable: int) -> int: ...
3
+ def mkldnn_set_verbose(level: int) -> int: ...
infer_4_30_0/lib/python3.10/site-packages/torch/_awaits/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Generic, TypeVar
4
+
5
+ import torch
6
+
7
+ __all__ = ['Await']
8
+
9
+ W = TypeVar("W")
10
+
11
+ class _PyAwaitMeta(type(torch._C._Await), type(Generic)): # type: ignore[misc, no-redef]
12
+ pass
13
+
14
+ class _Await(torch._C._Await, Generic[W], metaclass=_PyAwaitMeta):
15
+ r"""
16
+ Wrapper around a ``torch._C.Await`` which encapsulates delayed execution
17
+ of a callable. All manipulations happen with functions ``torch.jit._awaitable``,
18
+ ``torch.jit._awaitable_wait``, ``torch.jit._awaitable_nowait``.
19
+
20
+ Torch scriptable manipulations:
21
+ ``torch.jit._awaitable(func, *args)``
22
+ Creates ``Await[W]`` object, where W is return type of func.
23
+
24
+ Returns:
25
+ ``torch.jit._awaitable_wait(Await[W])``
26
+ Returns the result of the function, specified at ``_awaitable``, with specified arguments.
27
+
28
+ Returns:
29
+ The result of type ``W`` of the function call. The result is owned by ``Await[W]``
30
+ and returned on all following ``_awaitable_wait`` calls.
31
+
32
+
33
+ ``torch.jit._awaitable_nowait(W)``
34
+ Returns:
35
+ Trivial ``Await[W]`` with specified result.
36
+
37
+
38
+ Only in eager mode:
39
+ ``fn() -> Callable[Tuple[Any], W]``
40
+ Returns:
41
+ Specified at ``_awaitable`` python function ``func``.
42
+
43
+ ``args() -> Tuple[Any]``
44
+ Returns:
45
+ Specified at ``_awaitable`` python args.
46
+
47
+ ``is_nowait() -> _bool``
48
+ Returns:
49
+ ``True`` if this object was created via ``_awaitable_nowait`` call (trivial `Await[W]`).
50
+
51
+ In eager mode ``Await[W]`` can be used as ``W`` i.e. attributes of W can be called on ``Await[W]``,
52
+ ``_awaitable_wait()`` call will be transparently added.
53
+ """
infer_4_30_0/lib/python3.10/site-packages/torch/_awaits/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.05 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/__init__.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ The ``distributions`` package contains parameterizable probability distributions
3
+ and sampling functions. This allows the construction of stochastic computation
4
+ graphs and stochastic gradient estimators for optimization. This package
5
+ generally follows the design of the `TensorFlow Distributions`_ package.
6
+
7
+ .. _`TensorFlow Distributions`:
8
+ https://arxiv.org/abs/1711.10604
9
+
10
+ It is not possible to directly backpropagate through random samples. However,
11
+ there are two main methods for creating surrogate functions that can be
12
+ backpropagated through. These are the score function estimator/likelihood ratio
13
+ estimator/REINFORCE and the pathwise derivative estimator. REINFORCE is commonly
14
+ seen as the basis for policy gradient methods in reinforcement learning, and the
15
+ pathwise derivative estimator is commonly seen in the reparameterization trick
16
+ in variational autoencoders. Whilst the score function only requires the value
17
+ of samples :math:`f(x)`, the pathwise derivative requires the derivative
18
+ :math:`f'(x)`. The next sections discuss these two in a reinforcement learning
19
+ example. For more details see
20
+ `Gradient Estimation Using Stochastic Computation Graphs`_ .
21
+
22
+ .. _`Gradient Estimation Using Stochastic Computation Graphs`:
23
+ https://arxiv.org/abs/1506.05254
24
+
25
+ Score function
26
+ ^^^^^^^^^^^^^^
27
+
28
+ When the probability density function is differentiable with respect to its
29
+ parameters, we only need :meth:`~torch.distributions.Distribution.sample` and
30
+ :meth:`~torch.distributions.Distribution.log_prob` to implement REINFORCE:
31
+
32
+ .. math::
33
+
34
+ \Delta\theta = \alpha r \frac{\partial\log p(a|\pi^\theta(s))}{\partial\theta}
35
+
36
+ where :math:`\theta` are the parameters, :math:`\alpha` is the learning rate,
37
+ :math:`r` is the reward and :math:`p(a|\pi^\theta(s))` is the probability of
38
+ taking action :math:`a` in state :math:`s` given policy :math:`\pi^\theta`.
39
+
40
+ In practice we would sample an action from the output of a network, apply this
41
+ action in an environment, and then use ``log_prob`` to construct an equivalent
42
+ loss function. Note that we use a negative because optimizers use gradient
43
+ descent, whilst the rule above assumes gradient ascent. With a categorical
44
+ policy, the code for implementing REINFORCE would be as follows::
45
+
46
+ probs = policy_network(state)
47
+ # Note that this is equivalent to what used to be called multinomial
48
+ m = Categorical(probs)
49
+ action = m.sample()
50
+ next_state, reward = env.step(action)
51
+ loss = -m.log_prob(action) * reward
52
+ loss.backward()
53
+
54
+ Pathwise derivative
55
+ ^^^^^^^^^^^^^^^^^^^
56
+
57
+ The other way to implement these stochastic/policy gradients would be to use the
58
+ reparameterization trick from the
59
+ :meth:`~torch.distributions.Distribution.rsample` method, where the
60
+ parameterized random variable can be constructed via a parameterized
61
+ deterministic function of a parameter-free random variable. The reparameterized
62
+ sample therefore becomes differentiable. The code for implementing the pathwise
63
+ derivative would be as follows::
64
+
65
+ params = policy_network(state)
66
+ m = Normal(*params)
67
+ # Any distribution with .has_rsample == True could work based on the application
68
+ action = m.rsample()
69
+ next_state, reward = env.step(action) # Assuming that reward is differentiable
70
+ loss = -reward
71
+ loss.backward()
72
+ """
73
+
74
+ from . import transforms
75
+ from .bernoulli import Bernoulli
76
+ from .beta import Beta
77
+ from .binomial import Binomial
78
+ from .categorical import Categorical
79
+ from .cauchy import Cauchy
80
+ from .chi2 import Chi2
81
+ from .constraint_registry import biject_to, transform_to
82
+ from .continuous_bernoulli import ContinuousBernoulli
83
+ from .dirichlet import Dirichlet
84
+ from .distribution import Distribution
85
+ from .exp_family import ExponentialFamily
86
+ from .exponential import Exponential
87
+ from .fishersnedecor import FisherSnedecor
88
+ from .gamma import Gamma
89
+ from .geometric import Geometric
90
+ from .gumbel import Gumbel
91
+ from .half_cauchy import HalfCauchy
92
+ from .half_normal import HalfNormal
93
+ from .independent import Independent
94
+ from .inverse_gamma import InverseGamma
95
+ from .kl import _add_kl_info, kl_divergence, register_kl
96
+ from .kumaraswamy import Kumaraswamy
97
+ from .laplace import Laplace
98
+ from .lkj_cholesky import LKJCholesky
99
+ from .log_normal import LogNormal
100
+ from .logistic_normal import LogisticNormal
101
+ from .lowrank_multivariate_normal import LowRankMultivariateNormal
102
+ from .mixture_same_family import MixtureSameFamily
103
+ from .multinomial import Multinomial
104
+ from .multivariate_normal import MultivariateNormal
105
+ from .negative_binomial import NegativeBinomial
106
+ from .normal import Normal
107
+ from .one_hot_categorical import OneHotCategorical, OneHotCategoricalStraightThrough
108
+ from .pareto import Pareto
109
+ from .poisson import Poisson
110
+ from .relaxed_bernoulli import RelaxedBernoulli
111
+ from .relaxed_categorical import RelaxedOneHotCategorical
112
+ from .studentT import StudentT
113
+ from .transformed_distribution import TransformedDistribution
114
+ from .transforms import * # noqa: F403
115
+ from .uniform import Uniform
116
+ from .von_mises import VonMises
117
+ from .weibull import Weibull
118
+ from .wishart import Wishart
119
+
120
+
121
+ _add_kl_info()
122
+ del _add_kl_info
123
+
124
+ __all__ = [
125
+ "Bernoulli",
126
+ "Beta",
127
+ "Binomial",
128
+ "Categorical",
129
+ "Cauchy",
130
+ "Chi2",
131
+ "ContinuousBernoulli",
132
+ "Dirichlet",
133
+ "Distribution",
134
+ "Exponential",
135
+ "ExponentialFamily",
136
+ "FisherSnedecor",
137
+ "Gamma",
138
+ "Geometric",
139
+ "Gumbel",
140
+ "HalfCauchy",
141
+ "HalfNormal",
142
+ "Independent",
143
+ "InverseGamma",
144
+ "Kumaraswamy",
145
+ "LKJCholesky",
146
+ "Laplace",
147
+ "LogNormal",
148
+ "LogisticNormal",
149
+ "LowRankMultivariateNormal",
150
+ "MixtureSameFamily",
151
+ "Multinomial",
152
+ "MultivariateNormal",
153
+ "NegativeBinomial",
154
+ "Normal",
155
+ "OneHotCategorical",
156
+ "OneHotCategoricalStraightThrough",
157
+ "Pareto",
158
+ "RelaxedBernoulli",
159
+ "RelaxedOneHotCategorical",
160
+ "StudentT",
161
+ "Poisson",
162
+ "Uniform",
163
+ "VonMises",
164
+ "Weibull",
165
+ "Wishart",
166
+ "TransformedDistribution",
167
+ "biject_to",
168
+ "kl_divergence",
169
+ "register_kl",
170
+ "transform_to",
171
+ ]
172
+ __all__.extend(transforms.__all__)
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/__pycache__/dirichlet.cpython-310.pyc ADDED
Binary file (4.66 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/__pycache__/lkj_cholesky.cpython-310.pyc ADDED
Binary file (4.77 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/__pycache__/mixture_same_family.cpython-310.pyc ADDED
Binary file (7.22 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/__pycache__/von_mises.cpython-310.pyc ADDED
Binary file (5.94 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/bernoulli.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from numbers import Number
3
+
4
+ import torch
5
+ from torch import nan
6
+ from torch.distributions import constraints
7
+ from torch.distributions.exp_family import ExponentialFamily
8
+ from torch.distributions.utils import (
9
+ broadcast_all,
10
+ lazy_property,
11
+ logits_to_probs,
12
+ probs_to_logits,
13
+ )
14
+ from torch.nn.functional import binary_cross_entropy_with_logits
15
+
16
+
17
+ __all__ = ["Bernoulli"]
18
+
19
+
20
+ class Bernoulli(ExponentialFamily):
21
+ r"""
22
+ Creates a Bernoulli distribution parameterized by :attr:`probs`
23
+ or :attr:`logits` (but not both).
24
+
25
+ Samples are binary (0 or 1). They take the value `1` with probability `p`
26
+ and `0` with probability `1 - p`.
27
+
28
+ Example::
29
+
30
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
31
+ >>> m = Bernoulli(torch.tensor([0.3]))
32
+ >>> m.sample() # 30% chance 1; 70% chance 0
33
+ tensor([ 0.])
34
+
35
+ Args:
36
+ probs (Number, Tensor): the probability of sampling `1`
37
+ logits (Number, Tensor): the log-odds of sampling `1`
38
+ """
39
+ arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real}
40
+ support = constraints.boolean
41
+ has_enumerate_support = True
42
+ _mean_carrier_measure = 0
43
+
44
+ def __init__(self, probs=None, logits=None, validate_args=None):
45
+ if (probs is None) == (logits is None):
46
+ raise ValueError(
47
+ "Either `probs` or `logits` must be specified, but not both."
48
+ )
49
+ if probs is not None:
50
+ is_scalar = isinstance(probs, Number)
51
+ (self.probs,) = broadcast_all(probs)
52
+ else:
53
+ is_scalar = isinstance(logits, Number)
54
+ (self.logits,) = broadcast_all(logits)
55
+ self._param = self.probs if probs is not None else self.logits
56
+ if is_scalar:
57
+ batch_shape = torch.Size()
58
+ else:
59
+ batch_shape = self._param.size()
60
+ super().__init__(batch_shape, validate_args=validate_args)
61
+
62
+ def expand(self, batch_shape, _instance=None):
63
+ new = self._get_checked_instance(Bernoulli, _instance)
64
+ batch_shape = torch.Size(batch_shape)
65
+ if "probs" in self.__dict__:
66
+ new.probs = self.probs.expand(batch_shape)
67
+ new._param = new.probs
68
+ if "logits" in self.__dict__:
69
+ new.logits = self.logits.expand(batch_shape)
70
+ new._param = new.logits
71
+ super(Bernoulli, new).__init__(batch_shape, validate_args=False)
72
+ new._validate_args = self._validate_args
73
+ return new
74
+
75
+ def _new(self, *args, **kwargs):
76
+ return self._param.new(*args, **kwargs)
77
+
78
+ @property
79
+ def mean(self):
80
+ return self.probs
81
+
82
+ @property
83
+ def mode(self):
84
+ mode = (self.probs >= 0.5).to(self.probs)
85
+ mode[self.probs == 0.5] = nan
86
+ return mode
87
+
88
+ @property
89
+ def variance(self):
90
+ return self.probs * (1 - self.probs)
91
+
92
+ @lazy_property
93
+ def logits(self):
94
+ return probs_to_logits(self.probs, is_binary=True)
95
+
96
+ @lazy_property
97
+ def probs(self):
98
+ return logits_to_probs(self.logits, is_binary=True)
99
+
100
+ @property
101
+ def param_shape(self):
102
+ return self._param.size()
103
+
104
+ def sample(self, sample_shape=torch.Size()):
105
+ shape = self._extended_shape(sample_shape)
106
+ with torch.no_grad():
107
+ return torch.bernoulli(self.probs.expand(shape))
108
+
109
+ def log_prob(self, value):
110
+ if self._validate_args:
111
+ self._validate_sample(value)
112
+ logits, value = broadcast_all(self.logits, value)
113
+ return -binary_cross_entropy_with_logits(logits, value, reduction="none")
114
+
115
+ def entropy(self):
116
+ return binary_cross_entropy_with_logits(
117
+ self.logits, self.probs, reduction="none"
118
+ )
119
+
120
+ def enumerate_support(self, expand=True):
121
+ values = torch.arange(2, dtype=self._param.dtype, device=self._param.device)
122
+ values = values.view((-1,) + (1,) * len(self._batch_shape))
123
+ if expand:
124
+ values = values.expand((-1,) + self._batch_shape)
125
+ return values
126
+
127
+ @property
128
+ def _natural_params(self):
129
+ return (torch.logit(self.probs),)
130
+
131
+ def _log_normalizer(self, x):
132
+ return torch.log1p(torch.exp(x))
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/binomial.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from torch.distributions import constraints
4
+ from torch.distributions.distribution import Distribution
5
+ from torch.distributions.utils import (
6
+ broadcast_all,
7
+ lazy_property,
8
+ logits_to_probs,
9
+ probs_to_logits,
10
+ )
11
+
12
+
13
+ __all__ = ["Binomial"]
14
+
15
+
16
+ def _clamp_by_zero(x):
17
+ # works like clamp(x, min=0) but has grad at 0 is 0.5
18
+ return (x.clamp(min=0) + x - x.clamp(max=0)) / 2
19
+
20
+
21
+ class Binomial(Distribution):
22
+ r"""
23
+ Creates a Binomial distribution parameterized by :attr:`total_count` and
24
+ either :attr:`probs` or :attr:`logits` (but not both). :attr:`total_count` must be
25
+ broadcastable with :attr:`probs`/:attr:`logits`.
26
+
27
+ Example::
28
+
29
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
30
+ >>> m = Binomial(100, torch.tensor([0 , .2, .8, 1]))
31
+ >>> x = m.sample()
32
+ tensor([ 0., 22., 71., 100.])
33
+
34
+ >>> m = Binomial(torch.tensor([[5.], [10.]]), torch.tensor([0.5, 0.8]))
35
+ >>> x = m.sample()
36
+ tensor([[ 4., 5.],
37
+ [ 7., 6.]])
38
+
39
+ Args:
40
+ total_count (int or Tensor): number of Bernoulli trials
41
+ probs (Tensor): Event probabilities
42
+ logits (Tensor): Event log-odds
43
+ """
44
+ arg_constraints = {
45
+ "total_count": constraints.nonnegative_integer,
46
+ "probs": constraints.unit_interval,
47
+ "logits": constraints.real,
48
+ }
49
+ has_enumerate_support = True
50
+
51
+ def __init__(self, total_count=1, probs=None, logits=None, validate_args=None):
52
+ if (probs is None) == (logits is None):
53
+ raise ValueError(
54
+ "Either `probs` or `logits` must be specified, but not both."
55
+ )
56
+ if probs is not None:
57
+ (
58
+ self.total_count,
59
+ self.probs,
60
+ ) = broadcast_all(total_count, probs)
61
+ self.total_count = self.total_count.type_as(self.probs)
62
+ else:
63
+ (
64
+ self.total_count,
65
+ self.logits,
66
+ ) = broadcast_all(total_count, logits)
67
+ self.total_count = self.total_count.type_as(self.logits)
68
+
69
+ self._param = self.probs if probs is not None else self.logits
70
+ batch_shape = self._param.size()
71
+ super().__init__(batch_shape, validate_args=validate_args)
72
+
73
+ def expand(self, batch_shape, _instance=None):
74
+ new = self._get_checked_instance(Binomial, _instance)
75
+ batch_shape = torch.Size(batch_shape)
76
+ new.total_count = self.total_count.expand(batch_shape)
77
+ if "probs" in self.__dict__:
78
+ new.probs = self.probs.expand(batch_shape)
79
+ new._param = new.probs
80
+ if "logits" in self.__dict__:
81
+ new.logits = self.logits.expand(batch_shape)
82
+ new._param = new.logits
83
+ super(Binomial, new).__init__(batch_shape, validate_args=False)
84
+ new._validate_args = self._validate_args
85
+ return new
86
+
87
+ def _new(self, *args, **kwargs):
88
+ return self._param.new(*args, **kwargs)
89
+
90
+ @constraints.dependent_property(is_discrete=True, event_dim=0)
91
+ def support(self):
92
+ return constraints.integer_interval(0, self.total_count)
93
+
94
+ @property
95
+ def mean(self):
96
+ return self.total_count * self.probs
97
+
98
+ @property
99
+ def mode(self):
100
+ return ((self.total_count + 1) * self.probs).floor().clamp(max=self.total_count)
101
+
102
+ @property
103
+ def variance(self):
104
+ return self.total_count * self.probs * (1 - self.probs)
105
+
106
+ @lazy_property
107
+ def logits(self):
108
+ return probs_to_logits(self.probs, is_binary=True)
109
+
110
+ @lazy_property
111
+ def probs(self):
112
+ return logits_to_probs(self.logits, is_binary=True)
113
+
114
+ @property
115
+ def param_shape(self):
116
+ return self._param.size()
117
+
118
+ def sample(self, sample_shape=torch.Size()):
119
+ shape = self._extended_shape(sample_shape)
120
+ with torch.no_grad():
121
+ return torch.binomial(
122
+ self.total_count.expand(shape), self.probs.expand(shape)
123
+ )
124
+
125
+ def log_prob(self, value):
126
+ if self._validate_args:
127
+ self._validate_sample(value)
128
+ log_factorial_n = torch.lgamma(self.total_count + 1)
129
+ log_factorial_k = torch.lgamma(value + 1)
130
+ log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
131
+ # k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
132
+ # (case logit < 0) = k * logit - n * log1p(e^logit)
133
+ # (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
134
+ # = k * logit - n * logit - n * log1p(e^-logit)
135
+ # (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
136
+ normalize_term = (
137
+ self.total_count * _clamp_by_zero(self.logits)
138
+ + self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits)))
139
+ - log_factorial_n
140
+ )
141
+ return (
142
+ value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
143
+ )
144
+
145
+ def entropy(self):
146
+ total_count = int(self.total_count.max())
147
+ if not self.total_count.min() == total_count:
148
+ raise NotImplementedError(
149
+ "Inhomogeneous total count not supported by `entropy`."
150
+ )
151
+
152
+ log_prob = self.log_prob(self.enumerate_support(False))
153
+ return -(torch.exp(log_prob) * log_prob).sum(0)
154
+
155
+ def enumerate_support(self, expand=True):
156
+ total_count = int(self.total_count.max())
157
+ if not self.total_count.min() == total_count:
158
+ raise NotImplementedError(
159
+ "Inhomogeneous total count not supported by `enumerate_support`."
160
+ )
161
+ values = torch.arange(
162
+ 1 + total_count, dtype=self._param.dtype, device=self._param.device
163
+ )
164
+ values = values.view((-1,) + (1,) * len(self._batch_shape))
165
+ if expand:
166
+ values = values.expand((-1,) + self._batch_shape)
167
+ return values
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/cauchy.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import math
3
+ from numbers import Number
4
+
5
+ import torch
6
+ from torch import inf, nan
7
+ from torch.distributions import constraints
8
+ from torch.distributions.distribution import Distribution
9
+ from torch.distributions.utils import broadcast_all
10
+ from torch.types import _size
11
+
12
+
13
+ __all__ = ["Cauchy"]
14
+
15
+
16
+ class Cauchy(Distribution):
17
+ r"""
18
+ Samples from a Cauchy (Lorentz) distribution. The distribution of the ratio of
19
+ independent normally distributed random variables with means `0` follows a
20
+ Cauchy distribution.
21
+
22
+ Example::
23
+
24
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
25
+ >>> m = Cauchy(torch.tensor([0.0]), torch.tensor([1.0]))
26
+ >>> m.sample() # sample from a Cauchy distribution with loc=0 and scale=1
27
+ tensor([ 2.3214])
28
+
29
+ Args:
30
+ loc (float or Tensor): mode or median of the distribution.
31
+ scale (float or Tensor): half width at half maximum.
32
+ """
33
+ arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
34
+ support = constraints.real
35
+ has_rsample = True
36
+
37
+ def __init__(self, loc, scale, validate_args=None):
38
+ self.loc, self.scale = broadcast_all(loc, scale)
39
+ if isinstance(loc, Number) and isinstance(scale, Number):
40
+ batch_shape = torch.Size()
41
+ else:
42
+ batch_shape = self.loc.size()
43
+ super().__init__(batch_shape, validate_args=validate_args)
44
+
45
+ def expand(self, batch_shape, _instance=None):
46
+ new = self._get_checked_instance(Cauchy, _instance)
47
+ batch_shape = torch.Size(batch_shape)
48
+ new.loc = self.loc.expand(batch_shape)
49
+ new.scale = self.scale.expand(batch_shape)
50
+ super(Cauchy, new).__init__(batch_shape, validate_args=False)
51
+ new._validate_args = self._validate_args
52
+ return new
53
+
54
+ @property
55
+ def mean(self):
56
+ return torch.full(
57
+ self._extended_shape(), nan, dtype=self.loc.dtype, device=self.loc.device
58
+ )
59
+
60
+ @property
61
+ def mode(self):
62
+ return self.loc
63
+
64
+ @property
65
+ def variance(self):
66
+ return torch.full(
67
+ self._extended_shape(), inf, dtype=self.loc.dtype, device=self.loc.device
68
+ )
69
+
70
+ def rsample(self, sample_shape: _size = torch.Size()) -> torch.Tensor:
71
+ shape = self._extended_shape(sample_shape)
72
+ eps = self.loc.new(shape).cauchy_()
73
+ return self.loc + eps * self.scale
74
+
75
+ def log_prob(self, value):
76
+ if self._validate_args:
77
+ self._validate_sample(value)
78
+ return (
79
+ -math.log(math.pi)
80
+ - self.scale.log()
81
+ - (((value - self.loc) / self.scale) ** 2).log1p()
82
+ )
83
+
84
+ def cdf(self, value):
85
+ if self._validate_args:
86
+ self._validate_sample(value)
87
+ return torch.atan((value - self.loc) / self.scale) / math.pi + 0.5
88
+
89
+ def icdf(self, value):
90
+ return torch.tan(math.pi * (value - 0.5)) * self.scale + self.loc
91
+
92
+ def entropy(self):
93
+ return math.log(4 * math.pi) + self.scale.log()
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/chi2.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from torch.distributions import constraints
3
+ from torch.distributions.gamma import Gamma
4
+
5
+
6
+ __all__ = ["Chi2"]
7
+
8
+
9
+ class Chi2(Gamma):
10
+ r"""
11
+ Creates a Chi-squared distribution parameterized by shape parameter :attr:`df`.
12
+ This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)``
13
+
14
+ Example::
15
+
16
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
17
+ >>> m = Chi2(torch.tensor([1.0]))
18
+ >>> m.sample() # Chi2 distributed with shape df=1
19
+ tensor([ 0.1046])
20
+
21
+ Args:
22
+ df (float or Tensor): shape parameter of the distribution
23
+ """
24
+ arg_constraints = {"df": constraints.positive}
25
+
26
+ def __init__(self, df, validate_args=None):
27
+ super().__init__(0.5 * df, 0.5, validate_args=validate_args)
28
+
29
+ def expand(self, batch_shape, _instance=None):
30
+ new = self._get_checked_instance(Chi2, _instance)
31
+ return super().expand(batch_shape, new)
32
+
33
+ @property
34
+ def df(self):
35
+ return self.concentration * 2
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/constraint_registry.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ r"""
3
+ PyTorch provides two global :class:`ConstraintRegistry` objects that link
4
+ :class:`~torch.distributions.constraints.Constraint` objects to
5
+ :class:`~torch.distributions.transforms.Transform` objects. These objects both
6
+ input constraints and return transforms, but they have different guarantees on
7
+ bijectivity.
8
+
9
+ 1. ``biject_to(constraint)`` looks up a bijective
10
+ :class:`~torch.distributions.transforms.Transform` from ``constraints.real``
11
+ to the given ``constraint``. The returned transform is guaranteed to have
12
+ ``.bijective = True`` and should implement ``.log_abs_det_jacobian()``.
13
+ 2. ``transform_to(constraint)`` looks up a not-necessarily bijective
14
+ :class:`~torch.distributions.transforms.Transform` from ``constraints.real``
15
+ to the given ``constraint``. The returned transform is not guaranteed to
16
+ implement ``.log_abs_det_jacobian()``.
17
+
18
+ The ``transform_to()`` registry is useful for performing unconstrained
19
+ optimization on constrained parameters of probability distributions, which are
20
+ indicated by each distribution's ``.arg_constraints`` dict. These transforms often
21
+ overparameterize a space in order to avoid rotation; they are thus more
22
+ suitable for coordinate-wise optimization algorithms like Adam::
23
+
24
+ loc = torch.zeros(100, requires_grad=True)
25
+ unconstrained = torch.zeros(100, requires_grad=True)
26
+ scale = transform_to(Normal.arg_constraints['scale'])(unconstrained)
27
+ loss = -Normal(loc, scale).log_prob(data).sum()
28
+
29
+ The ``biject_to()`` registry is useful for Hamiltonian Monte Carlo, where
30
+ samples from a probability distribution with constrained ``.support`` are
31
+ propagated in an unconstrained space, and algorithms are typically rotation
32
+ invariant.::
33
+
34
+ dist = Exponential(rate)
35
+ unconstrained = torch.zeros(100, requires_grad=True)
36
+ sample = biject_to(dist.support)(unconstrained)
37
+ potential_energy = -dist.log_prob(sample).sum()
38
+
39
+ .. note::
40
+
41
+ An example where ``transform_to`` and ``biject_to`` differ is
42
+ ``constraints.simplex``: ``transform_to(constraints.simplex)`` returns a
43
+ :class:`~torch.distributions.transforms.SoftmaxTransform` that simply
44
+ exponentiates and normalizes its inputs; this is a cheap and mostly
45
+ coordinate-wise operation appropriate for algorithms like SVI. In
46
+ contrast, ``biject_to(constraints.simplex)`` returns a
47
+ :class:`~torch.distributions.transforms.StickBreakingTransform` that
48
+ bijects its input down to a one-fewer-dimensional space; this a more
49
+ expensive less numerically stable transform but is needed for algorithms
50
+ like HMC.
51
+
52
+ The ``biject_to`` and ``transform_to`` objects can be extended by user-defined
53
+ constraints and transforms using their ``.register()`` method either as a
54
+ function on singleton constraints::
55
+
56
+ transform_to.register(my_constraint, my_transform)
57
+
58
+ or as a decorator on parameterized constraints::
59
+
60
+ @transform_to.register(MyConstraintClass)
61
+ def my_factory(constraint):
62
+ assert isinstance(constraint, MyConstraintClass)
63
+ return MyTransform(constraint.param1, constraint.param2)
64
+
65
+ You can create your own registry by creating a new :class:`ConstraintRegistry`
66
+ object.
67
+ """
68
+
69
+ import numbers
70
+
71
+ from torch.distributions import constraints, transforms
72
+
73
+
74
+ __all__ = [
75
+ "ConstraintRegistry",
76
+ "biject_to",
77
+ "transform_to",
78
+ ]
79
+
80
+
81
+ class ConstraintRegistry:
82
+ """
83
+ Registry to link constraints to transforms.
84
+ """
85
+
86
+ def __init__(self):
87
+ self._registry = {}
88
+ super().__init__()
89
+
90
+ def register(self, constraint, factory=None):
91
+ """
92
+ Registers a :class:`~torch.distributions.constraints.Constraint`
93
+ subclass in this registry. Usage::
94
+
95
+ @my_registry.register(MyConstraintClass)
96
+ def construct_transform(constraint):
97
+ assert isinstance(constraint, MyConstraint)
98
+ return MyTransform(constraint.arg_constraints)
99
+
100
+ Args:
101
+ constraint (subclass of :class:`~torch.distributions.constraints.Constraint`):
102
+ A subclass of :class:`~torch.distributions.constraints.Constraint`, or
103
+ a singleton object of the desired class.
104
+ factory (Callable): A callable that inputs a constraint object and returns
105
+ a :class:`~torch.distributions.transforms.Transform` object.
106
+ """
107
+ # Support use as decorator.
108
+ if factory is None:
109
+ return lambda factory: self.register(constraint, factory)
110
+
111
+ # Support calling on singleton instances.
112
+ if isinstance(constraint, constraints.Constraint):
113
+ constraint = type(constraint)
114
+
115
+ if not isinstance(constraint, type) or not issubclass(
116
+ constraint, constraints.Constraint
117
+ ):
118
+ raise TypeError(
119
+ f"Expected constraint to be either a Constraint subclass or instance, but got {constraint}"
120
+ )
121
+
122
+ self._registry[constraint] = factory
123
+ return factory
124
+
125
+ def __call__(self, constraint):
126
+ """
127
+ Looks up a transform to constrained space, given a constraint object.
128
+ Usage::
129
+
130
+ constraint = Normal.arg_constraints['scale']
131
+ scale = transform_to(constraint)(torch.zeros(1)) # constrained
132
+ u = transform_to(constraint).inv(scale) # unconstrained
133
+
134
+ Args:
135
+ constraint (:class:`~torch.distributions.constraints.Constraint`):
136
+ A constraint object.
137
+
138
+ Returns:
139
+ A :class:`~torch.distributions.transforms.Transform` object.
140
+
141
+ Raises:
142
+ `NotImplementedError` if no transform has been registered.
143
+ """
144
+ # Look up by Constraint subclass.
145
+ try:
146
+ factory = self._registry[type(constraint)]
147
+ except KeyError:
148
+ raise NotImplementedError(
149
+ f"Cannot transform {type(constraint).__name__} constraints"
150
+ ) from None
151
+ return factory(constraint)
152
+
153
+
154
+ biject_to = ConstraintRegistry()
155
+ transform_to = ConstraintRegistry()
156
+
157
+
158
+ ################################################################################
159
+ # Registration Table
160
+ ################################################################################
161
+
162
+
163
+ @biject_to.register(constraints.real)
164
+ @transform_to.register(constraints.real)
165
+ def _transform_to_real(constraint):
166
+ return transforms.identity_transform
167
+
168
+
169
+ @biject_to.register(constraints.independent)
170
+ def _biject_to_independent(constraint):
171
+ base_transform = biject_to(constraint.base_constraint)
172
+ return transforms.IndependentTransform(
173
+ base_transform, constraint.reinterpreted_batch_ndims
174
+ )
175
+
176
+
177
+ @transform_to.register(constraints.independent)
178
+ def _transform_to_independent(constraint):
179
+ base_transform = transform_to(constraint.base_constraint)
180
+ return transforms.IndependentTransform(
181
+ base_transform, constraint.reinterpreted_batch_ndims
182
+ )
183
+
184
+
185
+ @biject_to.register(constraints.positive)
186
+ @biject_to.register(constraints.nonnegative)
187
+ @transform_to.register(constraints.positive)
188
+ @transform_to.register(constraints.nonnegative)
189
+ def _transform_to_positive(constraint):
190
+ return transforms.ExpTransform()
191
+
192
+
193
+ @biject_to.register(constraints.greater_than)
194
+ @biject_to.register(constraints.greater_than_eq)
195
+ @transform_to.register(constraints.greater_than)
196
+ @transform_to.register(constraints.greater_than_eq)
197
+ def _transform_to_greater_than(constraint):
198
+ return transforms.ComposeTransform(
199
+ [
200
+ transforms.ExpTransform(),
201
+ transforms.AffineTransform(constraint.lower_bound, 1),
202
+ ]
203
+ )
204
+
205
+
206
+ @biject_to.register(constraints.less_than)
207
+ @transform_to.register(constraints.less_than)
208
+ def _transform_to_less_than(constraint):
209
+ return transforms.ComposeTransform(
210
+ [
211
+ transforms.ExpTransform(),
212
+ transforms.AffineTransform(constraint.upper_bound, -1),
213
+ ]
214
+ )
215
+
216
+
217
+ @biject_to.register(constraints.interval)
218
+ @biject_to.register(constraints.half_open_interval)
219
+ @transform_to.register(constraints.interval)
220
+ @transform_to.register(constraints.half_open_interval)
221
+ def _transform_to_interval(constraint):
222
+ # Handle the special case of the unit interval.
223
+ lower_is_0 = (
224
+ isinstance(constraint.lower_bound, numbers.Number)
225
+ and constraint.lower_bound == 0
226
+ )
227
+ upper_is_1 = (
228
+ isinstance(constraint.upper_bound, numbers.Number)
229
+ and constraint.upper_bound == 1
230
+ )
231
+ if lower_is_0 and upper_is_1:
232
+ return transforms.SigmoidTransform()
233
+
234
+ loc = constraint.lower_bound
235
+ scale = constraint.upper_bound - constraint.lower_bound
236
+ return transforms.ComposeTransform(
237
+ [transforms.SigmoidTransform(), transforms.AffineTransform(loc, scale)]
238
+ )
239
+
240
+
241
+ @biject_to.register(constraints.simplex)
242
+ def _biject_to_simplex(constraint):
243
+ return transforms.StickBreakingTransform()
244
+
245
+
246
+ @transform_to.register(constraints.simplex)
247
+ def _transform_to_simplex(constraint):
248
+ return transforms.SoftmaxTransform()
249
+
250
+
251
+ # TODO define a bijection for LowerCholeskyTransform
252
+ @transform_to.register(constraints.lower_cholesky)
253
+ def _transform_to_lower_cholesky(constraint):
254
+ return transforms.LowerCholeskyTransform()
255
+
256
+
257
+ @transform_to.register(constraints.positive_definite)
258
+ @transform_to.register(constraints.positive_semidefinite)
259
+ def _transform_to_positive_definite(constraint):
260
+ return transforms.PositiveDefiniteTransform()
261
+
262
+
263
+ @biject_to.register(constraints.corr_cholesky)
264
+ @transform_to.register(constraints.corr_cholesky)
265
+ def _transform_to_corr_cholesky(constraint):
266
+ return transforms.CorrCholeskyTransform()
267
+
268
+
269
+ @biject_to.register(constraints.cat)
270
+ def _biject_to_cat(constraint):
271
+ return transforms.CatTransform(
272
+ [biject_to(c) for c in constraint.cseq], constraint.dim, constraint.lengths
273
+ )
274
+
275
+
276
+ @transform_to.register(constraints.cat)
277
+ def _transform_to_cat(constraint):
278
+ return transforms.CatTransform(
279
+ [transform_to(c) for c in constraint.cseq], constraint.dim, constraint.lengths
280
+ )
281
+
282
+
283
+ @biject_to.register(constraints.stack)
284
+ def _biject_to_stack(constraint):
285
+ return transforms.StackTransform(
286
+ [biject_to(c) for c in constraint.cseq], constraint.dim
287
+ )
288
+
289
+
290
+ @transform_to.register(constraints.stack)
291
+ def _transform_to_stack(constraint):
292
+ return transforms.StackTransform(
293
+ [transform_to(c) for c in constraint.cseq], constraint.dim
294
+ )
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/continuous_bernoulli.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import math
3
+ from numbers import Number
4
+
5
+ import torch
6
+ from torch.distributions import constraints
7
+ from torch.distributions.exp_family import ExponentialFamily
8
+ from torch.distributions.utils import (
9
+ broadcast_all,
10
+ clamp_probs,
11
+ lazy_property,
12
+ logits_to_probs,
13
+ probs_to_logits,
14
+ )
15
+ from torch.nn.functional import binary_cross_entropy_with_logits
16
+ from torch.types import _size
17
+
18
+
19
+ __all__ = ["ContinuousBernoulli"]
20
+
21
+
22
+ class ContinuousBernoulli(ExponentialFamily):
23
+ r"""
24
+ Creates a continuous Bernoulli distribution parameterized by :attr:`probs`
25
+ or :attr:`logits` (but not both).
26
+
27
+ The distribution is supported in [0, 1] and parameterized by 'probs' (in
28
+ (0,1)) or 'logits' (real-valued). Note that, unlike the Bernoulli, 'probs'
29
+ does not correspond to a probability and 'logits' does not correspond to
30
+ log-odds, but the same names are used due to the similarity with the
31
+ Bernoulli. See [1] for more details.
32
+
33
+ Example::
34
+
35
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
36
+ >>> m = ContinuousBernoulli(torch.tensor([0.3]))
37
+ >>> m.sample()
38
+ tensor([ 0.2538])
39
+
40
+ Args:
41
+ probs (Number, Tensor): (0,1) valued parameters
42
+ logits (Number, Tensor): real valued parameters whose sigmoid matches 'probs'
43
+
44
+ [1] The continuous Bernoulli: fixing a pervasive error in variational
45
+ autoencoders, Loaiza-Ganem G and Cunningham JP, NeurIPS 2019.
46
+ https://arxiv.org/abs/1907.06845
47
+ """
48
+ arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real}
49
+ support = constraints.unit_interval
50
+ _mean_carrier_measure = 0
51
+ has_rsample = True
52
+
53
+ def __init__(
54
+ self, probs=None, logits=None, lims=(0.499, 0.501), validate_args=None
55
+ ):
56
+ if (probs is None) == (logits is None):
57
+ raise ValueError(
58
+ "Either `probs` or `logits` must be specified, but not both."
59
+ )
60
+ if probs is not None:
61
+ is_scalar = isinstance(probs, Number)
62
+ (self.probs,) = broadcast_all(probs)
63
+ # validate 'probs' here if necessary as it is later clamped for numerical stability
64
+ # close to 0 and 1, later on; otherwise the clamped 'probs' would always pass
65
+ if validate_args is not None:
66
+ if not self.arg_constraints["probs"].check(self.probs).all():
67
+ raise ValueError("The parameter probs has invalid values")
68
+ self.probs = clamp_probs(self.probs)
69
+ else:
70
+ is_scalar = isinstance(logits, Number)
71
+ (self.logits,) = broadcast_all(logits)
72
+ self._param = self.probs if probs is not None else self.logits
73
+ if is_scalar:
74
+ batch_shape = torch.Size()
75
+ else:
76
+ batch_shape = self._param.size()
77
+ self._lims = lims
78
+ super().__init__(batch_shape, validate_args=validate_args)
79
+
80
+ def expand(self, batch_shape, _instance=None):
81
+ new = self._get_checked_instance(ContinuousBernoulli, _instance)
82
+ new._lims = self._lims
83
+ batch_shape = torch.Size(batch_shape)
84
+ if "probs" in self.__dict__:
85
+ new.probs = self.probs.expand(batch_shape)
86
+ new._param = new.probs
87
+ if "logits" in self.__dict__:
88
+ new.logits = self.logits.expand(batch_shape)
89
+ new._param = new.logits
90
+ super(ContinuousBernoulli, new).__init__(batch_shape, validate_args=False)
91
+ new._validate_args = self._validate_args
92
+ return new
93
+
94
+ def _new(self, *args, **kwargs):
95
+ return self._param.new(*args, **kwargs)
96
+
97
+ def _outside_unstable_region(self):
98
+ return torch.max(
99
+ torch.le(self.probs, self._lims[0]), torch.gt(self.probs, self._lims[1])
100
+ )
101
+
102
+ def _cut_probs(self):
103
+ return torch.where(
104
+ self._outside_unstable_region(),
105
+ self.probs,
106
+ self._lims[0] * torch.ones_like(self.probs),
107
+ )
108
+
109
+ def _cont_bern_log_norm(self):
110
+ """computes the log normalizing constant as a function of the 'probs' parameter"""
111
+ cut_probs = self._cut_probs()
112
+ cut_probs_below_half = torch.where(
113
+ torch.le(cut_probs, 0.5), cut_probs, torch.zeros_like(cut_probs)
114
+ )
115
+ cut_probs_above_half = torch.where(
116
+ torch.ge(cut_probs, 0.5), cut_probs, torch.ones_like(cut_probs)
117
+ )
118
+ log_norm = torch.log(
119
+ torch.abs(torch.log1p(-cut_probs) - torch.log(cut_probs))
120
+ ) - torch.where(
121
+ torch.le(cut_probs, 0.5),
122
+ torch.log1p(-2.0 * cut_probs_below_half),
123
+ torch.log(2.0 * cut_probs_above_half - 1.0),
124
+ )
125
+ x = torch.pow(self.probs - 0.5, 2)
126
+ taylor = math.log(2.0) + (4.0 / 3.0 + 104.0 / 45.0 * x) * x
127
+ return torch.where(self._outside_unstable_region(), log_norm, taylor)
128
+
129
+ @property
130
+ def mean(self):
131
+ cut_probs = self._cut_probs()
132
+ mus = cut_probs / (2.0 * cut_probs - 1.0) + 1.0 / (
133
+ torch.log1p(-cut_probs) - torch.log(cut_probs)
134
+ )
135
+ x = self.probs - 0.5
136
+ taylor = 0.5 + (1.0 / 3.0 + 16.0 / 45.0 * torch.pow(x, 2)) * x
137
+ return torch.where(self._outside_unstable_region(), mus, taylor)
138
+
139
+ @property
140
+ def stddev(self):
141
+ return torch.sqrt(self.variance)
142
+
143
+ @property
144
+ def variance(self):
145
+ cut_probs = self._cut_probs()
146
+ vars = cut_probs * (cut_probs - 1.0) / torch.pow(
147
+ 1.0 - 2.0 * cut_probs, 2
148
+ ) + 1.0 / torch.pow(torch.log1p(-cut_probs) - torch.log(cut_probs), 2)
149
+ x = torch.pow(self.probs - 0.5, 2)
150
+ taylor = 1.0 / 12.0 - (1.0 / 15.0 - 128.0 / 945.0 * x) * x
151
+ return torch.where(self._outside_unstable_region(), vars, taylor)
152
+
153
+ @lazy_property
154
+ def logits(self):
155
+ return probs_to_logits(self.probs, is_binary=True)
156
+
157
+ @lazy_property
158
+ def probs(self):
159
+ return clamp_probs(logits_to_probs(self.logits, is_binary=True))
160
+
161
+ @property
162
+ def param_shape(self):
163
+ return self._param.size()
164
+
165
+ def sample(self, sample_shape=torch.Size()):
166
+ shape = self._extended_shape(sample_shape)
167
+ u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device)
168
+ with torch.no_grad():
169
+ return self.icdf(u)
170
+
171
+ def rsample(self, sample_shape: _size = torch.Size()) -> torch.Tensor:
172
+ shape = self._extended_shape(sample_shape)
173
+ u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device)
174
+ return self.icdf(u)
175
+
176
+ def log_prob(self, value):
177
+ if self._validate_args:
178
+ self._validate_sample(value)
179
+ logits, value = broadcast_all(self.logits, value)
180
+ return (
181
+ -binary_cross_entropy_with_logits(logits, value, reduction="none")
182
+ + self._cont_bern_log_norm()
183
+ )
184
+
185
+ def cdf(self, value):
186
+ if self._validate_args:
187
+ self._validate_sample(value)
188
+ cut_probs = self._cut_probs()
189
+ cdfs = (
190
+ torch.pow(cut_probs, value) * torch.pow(1.0 - cut_probs, 1.0 - value)
191
+ + cut_probs
192
+ - 1.0
193
+ ) / (2.0 * cut_probs - 1.0)
194
+ unbounded_cdfs = torch.where(self._outside_unstable_region(), cdfs, value)
195
+ return torch.where(
196
+ torch.le(value, 0.0),
197
+ torch.zeros_like(value),
198
+ torch.where(torch.ge(value, 1.0), torch.ones_like(value), unbounded_cdfs),
199
+ )
200
+
201
+ def icdf(self, value):
202
+ cut_probs = self._cut_probs()
203
+ return torch.where(
204
+ self._outside_unstable_region(),
205
+ (
206
+ torch.log1p(-cut_probs + value * (2.0 * cut_probs - 1.0))
207
+ - torch.log1p(-cut_probs)
208
+ )
209
+ / (torch.log(cut_probs) - torch.log1p(-cut_probs)),
210
+ value,
211
+ )
212
+
213
+ def entropy(self):
214
+ log_probs0 = torch.log1p(-self.probs)
215
+ log_probs1 = torch.log(self.probs)
216
+ return (
217
+ self.mean * (log_probs0 - log_probs1)
218
+ - self._cont_bern_log_norm()
219
+ - log_probs0
220
+ )
221
+
222
+ @property
223
+ def _natural_params(self):
224
+ return (self.logits,)
225
+
226
+ def _log_normalizer(self, x):
227
+ """computes the log normalizing constant as a function of the natural parameter"""
228
+ out_unst_reg = torch.max(
229
+ torch.le(x, self._lims[0] - 0.5), torch.gt(x, self._lims[1] - 0.5)
230
+ )
231
+ cut_nat_params = torch.where(
232
+ out_unst_reg, x, (self._lims[0] - 0.5) * torch.ones_like(x)
233
+ )
234
+ log_norm = torch.log(torch.abs(torch.exp(cut_nat_params) - 1.0)) - torch.log(
235
+ torch.abs(cut_nat_params)
236
+ )
237
+ taylor = 0.5 * x + torch.pow(x, 2) / 24.0 - torch.pow(x, 4) / 2880.0
238
+ return torch.where(out_unst_reg, log_norm, taylor)
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/dirichlet.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from torch.autograd import Function
4
+ from torch.autograd.function import once_differentiable
5
+ from torch.distributions import constraints
6
+ from torch.distributions.exp_family import ExponentialFamily
7
+ from torch.types import _size
8
+
9
+
10
+ __all__ = ["Dirichlet"]
11
+
12
+
13
+ # This helper is exposed for testing.
14
+ def _Dirichlet_backward(x, concentration, grad_output):
15
+ total = concentration.sum(-1, True).expand_as(concentration)
16
+ grad = torch._dirichlet_grad(x, concentration, total)
17
+ return grad * (grad_output - (x * grad_output).sum(-1, True))
18
+
19
+
20
+ class _Dirichlet(Function):
21
+ @staticmethod
22
+ def forward(ctx, concentration):
23
+ x = torch._sample_dirichlet(concentration)
24
+ ctx.save_for_backward(x, concentration)
25
+ return x
26
+
27
+ @staticmethod
28
+ @once_differentiable
29
+ def backward(ctx, grad_output):
30
+ x, concentration = ctx.saved_tensors
31
+ return _Dirichlet_backward(x, concentration, grad_output)
32
+
33
+
34
+ class Dirichlet(ExponentialFamily):
35
+ r"""
36
+ Creates a Dirichlet distribution parameterized by concentration :attr:`concentration`.
37
+
38
+ Example::
39
+
40
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
41
+ >>> m = Dirichlet(torch.tensor([0.5, 0.5]))
42
+ >>> m.sample() # Dirichlet distributed with concentration [0.5, 0.5]
43
+ tensor([ 0.1046, 0.8954])
44
+
45
+ Args:
46
+ concentration (Tensor): concentration parameter of the distribution
47
+ (often referred to as alpha)
48
+ """
49
+ arg_constraints = {
50
+ "concentration": constraints.independent(constraints.positive, 1)
51
+ }
52
+ support = constraints.simplex
53
+ has_rsample = True
54
+
55
+ def __init__(self, concentration, validate_args=None):
56
+ if concentration.dim() < 1:
57
+ raise ValueError(
58
+ "`concentration` parameter must be at least one-dimensional."
59
+ )
60
+ self.concentration = concentration
61
+ batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:]
62
+ super().__init__(batch_shape, event_shape, validate_args=validate_args)
63
+
64
+ def expand(self, batch_shape, _instance=None):
65
+ new = self._get_checked_instance(Dirichlet, _instance)
66
+ batch_shape = torch.Size(batch_shape)
67
+ new.concentration = self.concentration.expand(batch_shape + self.event_shape)
68
+ super(Dirichlet, new).__init__(
69
+ batch_shape, self.event_shape, validate_args=False
70
+ )
71
+ new._validate_args = self._validate_args
72
+ return new
73
+
74
+ def rsample(self, sample_shape: _size = ()) -> torch.Tensor:
75
+ shape = self._extended_shape(sample_shape)
76
+ concentration = self.concentration.expand(shape)
77
+ return _Dirichlet.apply(concentration)
78
+
79
+ def log_prob(self, value):
80
+ if self._validate_args:
81
+ self._validate_sample(value)
82
+ return (
83
+ torch.xlogy(self.concentration - 1.0, value).sum(-1)
84
+ + torch.lgamma(self.concentration.sum(-1))
85
+ - torch.lgamma(self.concentration).sum(-1)
86
+ )
87
+
88
+ @property
89
+ def mean(self):
90
+ return self.concentration / self.concentration.sum(-1, True)
91
+
92
+ @property
93
+ def mode(self):
94
+ concentrationm1 = (self.concentration - 1).clamp(min=0.0)
95
+ mode = concentrationm1 / concentrationm1.sum(-1, True)
96
+ mask = (self.concentration < 1).all(axis=-1)
97
+ mode[mask] = torch.nn.functional.one_hot(
98
+ mode[mask].argmax(axis=-1), concentrationm1.shape[-1]
99
+ ).to(mode)
100
+ return mode
101
+
102
+ @property
103
+ def variance(self):
104
+ con0 = self.concentration.sum(-1, True)
105
+ return (
106
+ self.concentration
107
+ * (con0 - self.concentration)
108
+ / (con0.pow(2) * (con0 + 1))
109
+ )
110
+
111
+ def entropy(self):
112
+ k = self.concentration.size(-1)
113
+ a0 = self.concentration.sum(-1)
114
+ return (
115
+ torch.lgamma(self.concentration).sum(-1)
116
+ - torch.lgamma(a0)
117
+ - (k - a0) * torch.digamma(a0)
118
+ - ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1)
119
+ )
120
+
121
+ @property
122
+ def _natural_params(self):
123
+ return (self.concentration,)
124
+
125
+ def _log_normalizer(self, x):
126
+ return x.lgamma().sum(-1) - torch.lgamma(x.sum(-1))
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/distribution.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import warnings
3
+ from typing import Any, Dict, Optional
4
+ from typing_extensions import deprecated
5
+
6
+ import torch
7
+ from torch.distributions import constraints
8
+ from torch.distributions.utils import lazy_property
9
+ from torch.types import _size
10
+
11
+
12
+ __all__ = ["Distribution"]
13
+
14
+
15
+ class Distribution:
16
+ r"""
17
+ Distribution is the abstract base class for probability distributions.
18
+ """
19
+
20
+ has_rsample = False
21
+ has_enumerate_support = False
22
+ _validate_args = __debug__
23
+
24
+ @staticmethod
25
+ def set_default_validate_args(value: bool) -> None:
26
+ """
27
+ Sets whether validation is enabled or disabled.
28
+
29
+ The default behavior mimics Python's ``assert`` statement: validation
30
+ is on by default, but is disabled if Python is run in optimized mode
31
+ (via ``python -O``). Validation may be expensive, so you may want to
32
+ disable it once a model is working.
33
+
34
+ Args:
35
+ value (bool): Whether to enable validation.
36
+ """
37
+ if value not in [True, False]:
38
+ raise ValueError
39
+ Distribution._validate_args = value
40
+
41
+ def __init__(
42
+ self,
43
+ batch_shape: torch.Size = torch.Size(),
44
+ event_shape: torch.Size = torch.Size(),
45
+ validate_args: Optional[bool] = None,
46
+ ):
47
+ self._batch_shape = batch_shape
48
+ self._event_shape = event_shape
49
+ if validate_args is not None:
50
+ self._validate_args = validate_args
51
+ if self._validate_args:
52
+ try:
53
+ arg_constraints = self.arg_constraints
54
+ except NotImplementedError:
55
+ arg_constraints = {}
56
+ warnings.warn(
57
+ f"{self.__class__} does not define `arg_constraints`. "
58
+ + "Please set `arg_constraints = {}` or initialize the distribution "
59
+ + "with `validate_args=False` to turn off validation."
60
+ )
61
+ for param, constraint in arg_constraints.items():
62
+ if constraints.is_dependent(constraint):
63
+ continue # skip constraints that cannot be checked
64
+ if param not in self.__dict__ and isinstance(
65
+ getattr(type(self), param), lazy_property
66
+ ):
67
+ continue # skip checking lazily-constructed args
68
+ value = getattr(self, param)
69
+ valid = constraint.check(value)
70
+ if not valid.all():
71
+ raise ValueError(
72
+ f"Expected parameter {param} "
73
+ f"({type(value).__name__} of shape {tuple(value.shape)}) "
74
+ f"of distribution {repr(self)} "
75
+ f"to satisfy the constraint {repr(constraint)}, "
76
+ f"but found invalid values:\n{value}"
77
+ )
78
+ super().__init__()
79
+
80
+ def expand(self, batch_shape: _size, _instance=None):
81
+ """
82
+ Returns a new distribution instance (or populates an existing instance
83
+ provided by a derived class) with batch dimensions expanded to
84
+ `batch_shape`. This method calls :class:`~torch.Tensor.expand` on
85
+ the distribution's parameters. As such, this does not allocate new
86
+ memory for the expanded distribution instance. Additionally,
87
+ this does not repeat any args checking or parameter broadcasting in
88
+ `__init__.py`, when an instance is first created.
89
+
90
+ Args:
91
+ batch_shape (torch.Size): the desired expanded size.
92
+ _instance: new instance provided by subclasses that
93
+ need to override `.expand`.
94
+
95
+ Returns:
96
+ New distribution instance with batch dimensions expanded to
97
+ `batch_size`.
98
+ """
99
+ raise NotImplementedError
100
+
101
+ @property
102
+ def batch_shape(self) -> torch.Size:
103
+ """
104
+ Returns the shape over which parameters are batched.
105
+ """
106
+ return self._batch_shape
107
+
108
+ @property
109
+ def event_shape(self) -> torch.Size:
110
+ """
111
+ Returns the shape of a single sample (without batching).
112
+ """
113
+ return self._event_shape
114
+
115
+ @property
116
+ def arg_constraints(self) -> Dict[str, constraints.Constraint]:
117
+ """
118
+ Returns a dictionary from argument names to
119
+ :class:`~torch.distributions.constraints.Constraint` objects that
120
+ should be satisfied by each argument of this distribution. Args that
121
+ are not tensors need not appear in this dict.
122
+ """
123
+ raise NotImplementedError
124
+
125
+ @property
126
+ def support(self) -> Optional[Any]:
127
+ """
128
+ Returns a :class:`~torch.distributions.constraints.Constraint` object
129
+ representing this distribution's support.
130
+ """
131
+ raise NotImplementedError
132
+
133
+ @property
134
+ def mean(self) -> torch.Tensor:
135
+ """
136
+ Returns the mean of the distribution.
137
+ """
138
+ raise NotImplementedError
139
+
140
+ @property
141
+ def mode(self) -> torch.Tensor:
142
+ """
143
+ Returns the mode of the distribution.
144
+ """
145
+ raise NotImplementedError(f"{self.__class__} does not implement mode")
146
+
147
+ @property
148
+ def variance(self) -> torch.Tensor:
149
+ """
150
+ Returns the variance of the distribution.
151
+ """
152
+ raise NotImplementedError
153
+
154
+ @property
155
+ def stddev(self) -> torch.Tensor:
156
+ """
157
+ Returns the standard deviation of the distribution.
158
+ """
159
+ return self.variance.sqrt()
160
+
161
+ def sample(self, sample_shape: _size = torch.Size()) -> torch.Tensor:
162
+ """
163
+ Generates a sample_shape shaped sample or sample_shape shaped batch of
164
+ samples if the distribution parameters are batched.
165
+ """
166
+ with torch.no_grad():
167
+ return self.rsample(sample_shape)
168
+
169
+ def rsample(self, sample_shape: _size = torch.Size()) -> torch.Tensor:
170
+ """
171
+ Generates a sample_shape shaped reparameterized sample or sample_shape
172
+ shaped batch of reparameterized samples if the distribution parameters
173
+ are batched.
174
+ """
175
+ raise NotImplementedError
176
+
177
+ @deprecated(
178
+ "`sample_n(n)` will be deprecated. Use `sample((n,))` instead.",
179
+ category=FutureWarning,
180
+ )
181
+ def sample_n(self, n: int) -> torch.Tensor:
182
+ """
183
+ Generates n samples or n batches of samples if the distribution
184
+ parameters are batched.
185
+ """
186
+ return self.sample(torch.Size((n,)))
187
+
188
+ def log_prob(self, value: torch.Tensor) -> torch.Tensor:
189
+ """
190
+ Returns the log of the probability density/mass function evaluated at
191
+ `value`.
192
+
193
+ Args:
194
+ value (Tensor):
195
+ """
196
+ raise NotImplementedError
197
+
198
+ def cdf(self, value: torch.Tensor) -> torch.Tensor:
199
+ """
200
+ Returns the cumulative density/mass function evaluated at
201
+ `value`.
202
+
203
+ Args:
204
+ value (Tensor):
205
+ """
206
+ raise NotImplementedError
207
+
208
+ def icdf(self, value: torch.Tensor) -> torch.Tensor:
209
+ """
210
+ Returns the inverse cumulative density/mass function evaluated at
211
+ `value`.
212
+
213
+ Args:
214
+ value (Tensor):
215
+ """
216
+ raise NotImplementedError
217
+
218
+ def enumerate_support(self, expand: bool = True) -> torch.Tensor:
219
+ """
220
+ Returns tensor containing all values supported by a discrete
221
+ distribution. The result will enumerate over dimension 0, so the shape
222
+ of the result will be `(cardinality,) + batch_shape + event_shape`
223
+ (where `event_shape = ()` for univariate distributions).
224
+
225
+ Note that this enumerates over all batched tensors in lock-step
226
+ `[[0, 0], [1, 1], ...]`. With `expand=False`, enumeration happens
227
+ along dim 0, but with the remaining batch dimensions being
228
+ singleton dimensions, `[[0], [1], ..`.
229
+
230
+ To iterate over the full Cartesian product use
231
+ `itertools.product(m.enumerate_support())`.
232
+
233
+ Args:
234
+ expand (bool): whether to expand the support over the
235
+ batch dims to match the distribution's `batch_shape`.
236
+
237
+ Returns:
238
+ Tensor iterating over dimension 0.
239
+ """
240
+ raise NotImplementedError
241
+
242
+ def entropy(self) -> torch.Tensor:
243
+ """
244
+ Returns entropy of distribution, batched over batch_shape.
245
+
246
+ Returns:
247
+ Tensor of shape batch_shape.
248
+ """
249
+ raise NotImplementedError
250
+
251
+ def perplexity(self) -> torch.Tensor:
252
+ """
253
+ Returns perplexity of distribution, batched over batch_shape.
254
+
255
+ Returns:
256
+ Tensor of shape batch_shape.
257
+ """
258
+ return torch.exp(self.entropy())
259
+
260
+ def _extended_shape(self, sample_shape: _size = torch.Size()) -> torch.Size:
261
+ """
262
+ Returns the size of the sample returned by the distribution, given
263
+ a `sample_shape`. Note, that the batch and event shapes of a distribution
264
+ instance are fixed at the time of construction. If this is empty, the
265
+ returned shape is upcast to (1,).
266
+
267
+ Args:
268
+ sample_shape (torch.Size): the size of the sample to be drawn.
269
+ """
270
+ if not isinstance(sample_shape, torch.Size):
271
+ sample_shape = torch.Size(sample_shape)
272
+ return torch.Size(sample_shape + self._batch_shape + self._event_shape)
273
+
274
+ def _validate_sample(self, value: torch.Tensor) -> None:
275
+ """
276
+ Argument validation for distribution methods such as `log_prob`,
277
+ `cdf` and `icdf`. The rightmost dimensions of a value to be
278
+ scored via these methods must agree with the distribution's batch
279
+ and event shapes.
280
+
281
+ Args:
282
+ value (Tensor): the tensor whose log probability is to be
283
+ computed by the `log_prob` method.
284
+ Raises
285
+ ValueError: when the rightmost dimensions of `value` do not match the
286
+ distribution's batch and event shapes.
287
+ """
288
+ if not isinstance(value, torch.Tensor):
289
+ raise ValueError("The value argument to log_prob must be a Tensor")
290
+
291
+ event_dim_start = len(value.size()) - len(self._event_shape)
292
+ if value.size()[event_dim_start:] != self._event_shape:
293
+ raise ValueError(
294
+ f"The right-most size of value must match event_shape: {value.size()} vs {self._event_shape}."
295
+ )
296
+
297
+ actual_shape = value.size()
298
+ expected_shape = self._batch_shape + self._event_shape
299
+ for i, j in zip(reversed(actual_shape), reversed(expected_shape)):
300
+ if i != 1 and j != 1 and i != j:
301
+ raise ValueError(
302
+ f"Value is not broadcastable with batch_shape+event_shape: {actual_shape} vs {expected_shape}."
303
+ )
304
+ try:
305
+ support = self.support
306
+ except NotImplementedError:
307
+ warnings.warn(
308
+ f"{self.__class__} does not define `support` to enable "
309
+ + "sample validation. Please initialize the distribution with "
310
+ + "`validate_args=False` to turn off validation."
311
+ )
312
+ return
313
+ assert support is not None
314
+ valid = support.check(value)
315
+ if not valid.all():
316
+ raise ValueError(
317
+ "Expected value argument "
318
+ f"({type(value).__name__} of shape {tuple(value.shape)}) "
319
+ f"to be within the support ({repr(support)}) "
320
+ f"of the distribution {repr(self)}, "
321
+ f"but found invalid values:\n{value}"
322
+ )
323
+
324
+ def _get_checked_instance(self, cls, _instance=None):
325
+ if _instance is None and type(self).__init__ != cls.__init__:
326
+ raise NotImplementedError(
327
+ f"Subclass {self.__class__.__name__} of {cls.__name__} that defines a custom __init__ method "
328
+ "must also define a custom .expand() method."
329
+ )
330
+ return self.__new__(type(self)) if _instance is None else _instance
331
+
332
+ def __repr__(self) -> str:
333
+ param_names = [k for k, _ in self.arg_constraints.items() if k in self.__dict__]
334
+ args_string = ", ".join(
335
+ [
336
+ f"{p}: {self.__dict__[p] if self.__dict__[p].numel() == 1 else self.__dict__[p].size()}"
337
+ for p in param_names
338
+ ]
339
+ )
340
+ return self.__class__.__name__ + "(" + args_string + ")"
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/exp_family.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from torch.distributions.distribution import Distribution
4
+
5
+
6
+ __all__ = ["ExponentialFamily"]
7
+
8
+
9
+ class ExponentialFamily(Distribution):
10
+ r"""
11
+ ExponentialFamily is the abstract base class for probability distributions belonging to an
12
+ exponential family, whose probability mass/density function has the form is defined below
13
+
14
+ .. math::
15
+
16
+ p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x))
17
+
18
+ where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic,
19
+ :math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier
20
+ measure.
21
+
22
+ Note:
23
+ This class is an intermediary between the `Distribution` class and distributions which belong
24
+ to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL
25
+ divergence methods. We use this class to compute the entropy and KL divergence using the AD
26
+ framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and
27
+ Cross-entropies of Exponential Families).
28
+ """
29
+
30
+ @property
31
+ def _natural_params(self):
32
+ """
33
+ Abstract method for natural parameters. Returns a tuple of Tensors based
34
+ on the distribution
35
+ """
36
+ raise NotImplementedError
37
+
38
+ def _log_normalizer(self, *natural_params):
39
+ """
40
+ Abstract method for log normalizer function. Returns a log normalizer based on
41
+ the distribution and input
42
+ """
43
+ raise NotImplementedError
44
+
45
+ @property
46
+ def _mean_carrier_measure(self):
47
+ """
48
+ Abstract method for expected carrier measure, which is required for computing
49
+ entropy.
50
+ """
51
+ raise NotImplementedError
52
+
53
+ def entropy(self):
54
+ """
55
+ Method to compute the entropy using Bregman divergence of the log normalizer.
56
+ """
57
+ result = -self._mean_carrier_measure
58
+ nparams = [p.detach().requires_grad_() for p in self._natural_params]
59
+ lg_normal = self._log_normalizer(*nparams)
60
+ gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True)
61
+ result += lg_normal
62
+ for np, g in zip(nparams, gradients):
63
+ result -= (np * g).reshape(self._batch_shape + (-1,)).sum(-1)
64
+ return result
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/exponential.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from numbers import Number
3
+
4
+ import torch
5
+ from torch.distributions import constraints
6
+ from torch.distributions.exp_family import ExponentialFamily
7
+ from torch.distributions.utils import broadcast_all
8
+ from torch.types import _size
9
+
10
+
11
+ __all__ = ["Exponential"]
12
+
13
+
14
+ class Exponential(ExponentialFamily):
15
+ r"""
16
+ Creates a Exponential distribution parameterized by :attr:`rate`.
17
+
18
+ Example::
19
+
20
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
21
+ >>> m = Exponential(torch.tensor([1.0]))
22
+ >>> m.sample() # Exponential distributed with rate=1
23
+ tensor([ 0.1046])
24
+
25
+ Args:
26
+ rate (float or Tensor): rate = 1 / scale of the distribution
27
+ """
28
+ arg_constraints = {"rate": constraints.positive}
29
+ support = constraints.nonnegative
30
+ has_rsample = True
31
+ _mean_carrier_measure = 0
32
+
33
+ @property
34
+ def mean(self):
35
+ return self.rate.reciprocal()
36
+
37
+ @property
38
+ def mode(self):
39
+ return torch.zeros_like(self.rate)
40
+
41
+ @property
42
+ def stddev(self):
43
+ return self.rate.reciprocal()
44
+
45
+ @property
46
+ def variance(self):
47
+ return self.rate.pow(-2)
48
+
49
+ def __init__(self, rate, validate_args=None):
50
+ (self.rate,) = broadcast_all(rate)
51
+ batch_shape = torch.Size() if isinstance(rate, Number) else self.rate.size()
52
+ super().__init__(batch_shape, validate_args=validate_args)
53
+
54
+ def expand(self, batch_shape, _instance=None):
55
+ new = self._get_checked_instance(Exponential, _instance)
56
+ batch_shape = torch.Size(batch_shape)
57
+ new.rate = self.rate.expand(batch_shape)
58
+ super(Exponential, new).__init__(batch_shape, validate_args=False)
59
+ new._validate_args = self._validate_args
60
+ return new
61
+
62
+ def rsample(self, sample_shape: _size = torch.Size()) -> torch.Tensor:
63
+ shape = self._extended_shape(sample_shape)
64
+ return self.rate.new(shape).exponential_() / self.rate
65
+
66
+ def log_prob(self, value):
67
+ if self._validate_args:
68
+ self._validate_sample(value)
69
+ return self.rate.log() - self.rate * value
70
+
71
+ def cdf(self, value):
72
+ if self._validate_args:
73
+ self._validate_sample(value)
74
+ return 1 - torch.exp(-self.rate * value)
75
+
76
+ def icdf(self, value):
77
+ return -torch.log1p(-value) / self.rate
78
+
79
+ def entropy(self):
80
+ return 1.0 - torch.log(self.rate)
81
+
82
+ @property
83
+ def _natural_params(self):
84
+ return (-self.rate,)
85
+
86
+ def _log_normalizer(self, x):
87
+ return -torch.log(-x)
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/gamma.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from numbers import Number
3
+
4
+ import torch
5
+ from torch.distributions import constraints
6
+ from torch.distributions.exp_family import ExponentialFamily
7
+ from torch.distributions.utils import broadcast_all
8
+ from torch.types import _size
9
+
10
+
11
+ __all__ = ["Gamma"]
12
+
13
+
14
+ def _standard_gamma(concentration):
15
+ return torch._standard_gamma(concentration)
16
+
17
+
18
+ class Gamma(ExponentialFamily):
19
+ r"""
20
+ Creates a Gamma distribution parameterized by shape :attr:`concentration` and :attr:`rate`.
21
+
22
+ Example::
23
+
24
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
25
+ >>> m = Gamma(torch.tensor([1.0]), torch.tensor([1.0]))
26
+ >>> m.sample() # Gamma distributed with concentration=1 and rate=1
27
+ tensor([ 0.1046])
28
+
29
+ Args:
30
+ concentration (float or Tensor): shape parameter of the distribution
31
+ (often referred to as alpha)
32
+ rate (float or Tensor): rate = 1 / scale of the distribution
33
+ (often referred to as beta)
34
+ """
35
+ arg_constraints = {
36
+ "concentration": constraints.positive,
37
+ "rate": constraints.positive,
38
+ }
39
+ support = constraints.nonnegative
40
+ has_rsample = True
41
+ _mean_carrier_measure = 0
42
+
43
+ @property
44
+ def mean(self):
45
+ return self.concentration / self.rate
46
+
47
+ @property
48
+ def mode(self):
49
+ return ((self.concentration - 1) / self.rate).clamp(min=0)
50
+
51
+ @property
52
+ def variance(self):
53
+ return self.concentration / self.rate.pow(2)
54
+
55
+ def __init__(self, concentration, rate, validate_args=None):
56
+ self.concentration, self.rate = broadcast_all(concentration, rate)
57
+ if isinstance(concentration, Number) and isinstance(rate, Number):
58
+ batch_shape = torch.Size()
59
+ else:
60
+ batch_shape = self.concentration.size()
61
+ super().__init__(batch_shape, validate_args=validate_args)
62
+
63
+ def expand(self, batch_shape, _instance=None):
64
+ new = self._get_checked_instance(Gamma, _instance)
65
+ batch_shape = torch.Size(batch_shape)
66
+ new.concentration = self.concentration.expand(batch_shape)
67
+ new.rate = self.rate.expand(batch_shape)
68
+ super(Gamma, new).__init__(batch_shape, validate_args=False)
69
+ new._validate_args = self._validate_args
70
+ return new
71
+
72
+ def rsample(self, sample_shape: _size = torch.Size()) -> torch.Tensor:
73
+ shape = self._extended_shape(sample_shape)
74
+ value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(
75
+ shape
76
+ )
77
+ value.detach().clamp_(
78
+ min=torch.finfo(value.dtype).tiny
79
+ ) # do not record in autograd graph
80
+ return value
81
+
82
+ def log_prob(self, value):
83
+ value = torch.as_tensor(value, dtype=self.rate.dtype, device=self.rate.device)
84
+ if self._validate_args:
85
+ self._validate_sample(value)
86
+ return (
87
+ torch.xlogy(self.concentration, self.rate)
88
+ + torch.xlogy(self.concentration - 1, value)
89
+ - self.rate * value
90
+ - torch.lgamma(self.concentration)
91
+ )
92
+
93
+ def entropy(self):
94
+ return (
95
+ self.concentration
96
+ - torch.log(self.rate)
97
+ + torch.lgamma(self.concentration)
98
+ + (1.0 - self.concentration) * torch.digamma(self.concentration)
99
+ )
100
+
101
+ @property
102
+ def _natural_params(self):
103
+ return (self.concentration - 1, -self.rate)
104
+
105
+ def _log_normalizer(self, x, y):
106
+ return torch.lgamma(x + 1) + (x + 1) * torch.log(-y.reciprocal())
107
+
108
+ def cdf(self, value):
109
+ if self._validate_args:
110
+ self._validate_sample(value)
111
+ return torch.special.gammainc(self.concentration, self.rate * value)
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/gumbel.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import math
3
+ from numbers import Number
4
+
5
+ import torch
6
+ from torch.distributions import constraints
7
+ from torch.distributions.transformed_distribution import TransformedDistribution
8
+ from torch.distributions.transforms import AffineTransform, ExpTransform
9
+ from torch.distributions.uniform import Uniform
10
+ from torch.distributions.utils import broadcast_all, euler_constant
11
+
12
+
13
+ __all__ = ["Gumbel"]
14
+
15
+
16
+ class Gumbel(TransformedDistribution):
17
+ r"""
18
+ Samples from a Gumbel Distribution.
19
+
20
+ Examples::
21
+
22
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
23
+ >>> m = Gumbel(torch.tensor([1.0]), torch.tensor([2.0]))
24
+ >>> m.sample() # sample from Gumbel distribution with loc=1, scale=2
25
+ tensor([ 1.0124])
26
+
27
+ Args:
28
+ loc (float or Tensor): Location parameter of the distribution
29
+ scale (float or Tensor): Scale parameter of the distribution
30
+ """
31
+ arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
32
+ support = constraints.real
33
+
34
+ def __init__(self, loc, scale, validate_args=None):
35
+ self.loc, self.scale = broadcast_all(loc, scale)
36
+ finfo = torch.finfo(self.loc.dtype)
37
+ if isinstance(loc, Number) and isinstance(scale, Number):
38
+ base_dist = Uniform(finfo.tiny, 1 - finfo.eps, validate_args=validate_args)
39
+ else:
40
+ base_dist = Uniform(
41
+ torch.full_like(self.loc, finfo.tiny),
42
+ torch.full_like(self.loc, 1 - finfo.eps),
43
+ validate_args=validate_args,
44
+ )
45
+ transforms = [
46
+ ExpTransform().inv,
47
+ AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),
48
+ ExpTransform().inv,
49
+ AffineTransform(loc=loc, scale=-self.scale),
50
+ ]
51
+ super().__init__(base_dist, transforms, validate_args=validate_args)
52
+
53
+ def expand(self, batch_shape, _instance=None):
54
+ new = self._get_checked_instance(Gumbel, _instance)
55
+ new.loc = self.loc.expand(batch_shape)
56
+ new.scale = self.scale.expand(batch_shape)
57
+ return super().expand(batch_shape, _instance=new)
58
+
59
+ # Explicitly defining the log probability function for Gumbel due to precision issues
60
+ def log_prob(self, value):
61
+ if self._validate_args:
62
+ self._validate_sample(value)
63
+ y = (self.loc - value) / self.scale
64
+ return (y - y.exp()) - self.scale.log()
65
+
66
+ @property
67
+ def mean(self):
68
+ return self.loc + self.scale * euler_constant
69
+
70
+ @property
71
+ def mode(self):
72
+ return self.loc
73
+
74
+ @property
75
+ def stddev(self):
76
+ return (math.pi / math.sqrt(6)) * self.scale
77
+
78
+ @property
79
+ def variance(self):
80
+ return self.stddev.pow(2)
81
+
82
+ def entropy(self):
83
+ return self.scale.log() + (1 + euler_constant)
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/half_cauchy.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import math
3
+
4
+ import torch
5
+ from torch import inf
6
+ from torch.distributions import constraints
7
+ from torch.distributions.cauchy import Cauchy
8
+ from torch.distributions.transformed_distribution import TransformedDistribution
9
+ from torch.distributions.transforms import AbsTransform
10
+
11
+
12
+ __all__ = ["HalfCauchy"]
13
+
14
+
15
+ class HalfCauchy(TransformedDistribution):
16
+ r"""
17
+ Creates a half-Cauchy distribution parameterized by `scale` where::
18
+
19
+ X ~ Cauchy(0, scale)
20
+ Y = |X| ~ HalfCauchy(scale)
21
+
22
+ Example::
23
+
24
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
25
+ >>> m = HalfCauchy(torch.tensor([1.0]))
26
+ >>> m.sample() # half-cauchy distributed with scale=1
27
+ tensor([ 2.3214])
28
+
29
+ Args:
30
+ scale (float or Tensor): scale of the full Cauchy distribution
31
+ """
32
+ arg_constraints = {"scale": constraints.positive}
33
+ support = constraints.nonnegative
34
+ has_rsample = True
35
+
36
+ def __init__(self, scale, validate_args=None):
37
+ base_dist = Cauchy(0, scale, validate_args=False)
38
+ super().__init__(base_dist, AbsTransform(), validate_args=validate_args)
39
+
40
+ def expand(self, batch_shape, _instance=None):
41
+ new = self._get_checked_instance(HalfCauchy, _instance)
42
+ return super().expand(batch_shape, _instance=new)
43
+
44
+ @property
45
+ def scale(self):
46
+ return self.base_dist.scale
47
+
48
+ @property
49
+ def mean(self):
50
+ return torch.full(
51
+ self._extended_shape(),
52
+ math.inf,
53
+ dtype=self.scale.dtype,
54
+ device=self.scale.device,
55
+ )
56
+
57
+ @property
58
+ def mode(self):
59
+ return torch.zeros_like(self.scale)
60
+
61
+ @property
62
+ def variance(self):
63
+ return self.base_dist.variance
64
+
65
+ def log_prob(self, value):
66
+ if self._validate_args:
67
+ self._validate_sample(value)
68
+ value = torch.as_tensor(
69
+ value, dtype=self.base_dist.scale.dtype, device=self.base_dist.scale.device
70
+ )
71
+ log_prob = self.base_dist.log_prob(value) + math.log(2)
72
+ log_prob = torch.where(value >= 0, log_prob, -inf)
73
+ return log_prob
74
+
75
+ def cdf(self, value):
76
+ if self._validate_args:
77
+ self._validate_sample(value)
78
+ return 2 * self.base_dist.cdf(value) - 1
79
+
80
+ def icdf(self, prob):
81
+ return self.base_dist.icdf((prob + 1) / 2)
82
+
83
+ def entropy(self):
84
+ return self.base_dist.entropy() - math.log(2)
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/inverse_gamma.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from torch.distributions import constraints
4
+ from torch.distributions.gamma import Gamma
5
+ from torch.distributions.transformed_distribution import TransformedDistribution
6
+ from torch.distributions.transforms import PowerTransform
7
+
8
+
9
+ __all__ = ["InverseGamma"]
10
+
11
+
12
+ class InverseGamma(TransformedDistribution):
13
+ r"""
14
+ Creates an inverse gamma distribution parameterized by :attr:`concentration` and :attr:`rate`
15
+ where::
16
+
17
+ X ~ Gamma(concentration, rate)
18
+ Y = 1 / X ~ InverseGamma(concentration, rate)
19
+
20
+ Example::
21
+
22
+ >>> # xdoctest: +IGNORE_WANT("non-deterinistic")
23
+ >>> m = InverseGamma(torch.tensor([2.0]), torch.tensor([3.0]))
24
+ >>> m.sample()
25
+ tensor([ 1.2953])
26
+
27
+ Args:
28
+ concentration (float or Tensor): shape parameter of the distribution
29
+ (often referred to as alpha)
30
+ rate (float or Tensor): rate = 1 / scale of the distribution
31
+ (often referred to as beta)
32
+ """
33
+ arg_constraints = {
34
+ "concentration": constraints.positive,
35
+ "rate": constraints.positive,
36
+ }
37
+ support = constraints.positive
38
+ has_rsample = True
39
+
40
+ def __init__(self, concentration, rate, validate_args=None):
41
+ base_dist = Gamma(concentration, rate, validate_args=validate_args)
42
+ neg_one = -base_dist.rate.new_ones(())
43
+ super().__init__(
44
+ base_dist, PowerTransform(neg_one), validate_args=validate_args
45
+ )
46
+
47
+ def expand(self, batch_shape, _instance=None):
48
+ new = self._get_checked_instance(InverseGamma, _instance)
49
+ return super().expand(batch_shape, _instance=new)
50
+
51
+ @property
52
+ def concentration(self):
53
+ return self.base_dist.concentration
54
+
55
+ @property
56
+ def rate(self):
57
+ return self.base_dist.rate
58
+
59
+ @property
60
+ def mean(self):
61
+ result = self.rate / (self.concentration - 1)
62
+ return torch.where(self.concentration > 1, result, torch.inf)
63
+
64
+ @property
65
+ def mode(self):
66
+ return self.rate / (self.concentration + 1)
67
+
68
+ @property
69
+ def variance(self):
70
+ result = self.rate.square() / (
71
+ (self.concentration - 1).square() * (self.concentration - 2)
72
+ )
73
+ return torch.where(self.concentration > 2, result, torch.inf)
74
+
75
+ def entropy(self):
76
+ return (
77
+ self.concentration
78
+ + self.rate.log()
79
+ + self.concentration.lgamma()
80
+ - (1 + self.concentration) * self.concentration.digamma()
81
+ )
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/kl.py ADDED
@@ -0,0 +1,972 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import math
3
+ import warnings
4
+ from functools import total_ordering
5
+ from typing import Callable, Dict, Tuple, Type
6
+
7
+ import torch
8
+ from torch import inf
9
+
10
+ from .bernoulli import Bernoulli
11
+ from .beta import Beta
12
+ from .binomial import Binomial
13
+ from .categorical import Categorical
14
+ from .cauchy import Cauchy
15
+ from .continuous_bernoulli import ContinuousBernoulli
16
+ from .dirichlet import Dirichlet
17
+ from .distribution import Distribution
18
+ from .exp_family import ExponentialFamily
19
+ from .exponential import Exponential
20
+ from .gamma import Gamma
21
+ from .geometric import Geometric
22
+ from .gumbel import Gumbel
23
+ from .half_normal import HalfNormal
24
+ from .independent import Independent
25
+ from .laplace import Laplace
26
+ from .lowrank_multivariate_normal import (
27
+ _batch_lowrank_logdet,
28
+ _batch_lowrank_mahalanobis,
29
+ LowRankMultivariateNormal,
30
+ )
31
+ from .multivariate_normal import _batch_mahalanobis, MultivariateNormal
32
+ from .normal import Normal
33
+ from .one_hot_categorical import OneHotCategorical
34
+ from .pareto import Pareto
35
+ from .poisson import Poisson
36
+ from .transformed_distribution import TransformedDistribution
37
+ from .uniform import Uniform
38
+ from .utils import _sum_rightmost, euler_constant as _euler_gamma
39
+
40
+
41
+ _KL_REGISTRY: Dict[
42
+ Tuple[Type, Type], Callable
43
+ ] = {} # Source of truth mapping a few general (type, type) pairs to functions.
44
+ _KL_MEMOIZE: Dict[
45
+ Tuple[Type, Type], Callable
46
+ ] = {} # Memoized version mapping many specific (type, type) pairs to functions.
47
+
48
+ __all__ = ["register_kl", "kl_divergence"]
49
+
50
+
51
+ def register_kl(type_p, type_q):
52
+ """
53
+ Decorator to register a pairwise function with :meth:`kl_divergence`.
54
+ Usage::
55
+
56
+ @register_kl(Normal, Normal)
57
+ def kl_normal_normal(p, q):
58
+ # insert implementation here
59
+
60
+ Lookup returns the most specific (type,type) match ordered by subclass. If
61
+ the match is ambiguous, a `RuntimeWarning` is raised. For example to
62
+ resolve the ambiguous situation::
63
+
64
+ @register_kl(BaseP, DerivedQ)
65
+ def kl_version1(p, q): ...
66
+ @register_kl(DerivedP, BaseQ)
67
+ def kl_version2(p, q): ...
68
+
69
+ you should register a third most-specific implementation, e.g.::
70
+
71
+ register_kl(DerivedP, DerivedQ)(kl_version1) # Break the tie.
72
+
73
+ Args:
74
+ type_p (type): A subclass of :class:`~torch.distributions.Distribution`.
75
+ type_q (type): A subclass of :class:`~torch.distributions.Distribution`.
76
+ """
77
+ if not isinstance(type_p, type) and issubclass(type_p, Distribution):
78
+ raise TypeError(
79
+ f"Expected type_p to be a Distribution subclass but got {type_p}"
80
+ )
81
+ if not isinstance(type_q, type) and issubclass(type_q, Distribution):
82
+ raise TypeError(
83
+ f"Expected type_q to be a Distribution subclass but got {type_q}"
84
+ )
85
+
86
+ def decorator(fun):
87
+ _KL_REGISTRY[type_p, type_q] = fun
88
+ _KL_MEMOIZE.clear() # reset since lookup order may have changed
89
+ return fun
90
+
91
+ return decorator
92
+
93
+
94
+ @total_ordering
95
+ class _Match:
96
+ __slots__ = ["types"]
97
+
98
+ def __init__(self, *types):
99
+ self.types = types
100
+
101
+ def __eq__(self, other):
102
+ return self.types == other.types
103
+
104
+ def __le__(self, other):
105
+ for x, y in zip(self.types, other.types):
106
+ if not issubclass(x, y):
107
+ return False
108
+ if x is not y:
109
+ break
110
+ return True
111
+
112
+
113
+ def _dispatch_kl(type_p, type_q):
114
+ """
115
+ Find the most specific approximate match, assuming single inheritance.
116
+ """
117
+ matches = [
118
+ (super_p, super_q)
119
+ for super_p, super_q in _KL_REGISTRY
120
+ if issubclass(type_p, super_p) and issubclass(type_q, super_q)
121
+ ]
122
+ if not matches:
123
+ return NotImplemented
124
+ # Check that the left- and right- lexicographic orders agree.
125
+ # mypy isn't smart enough to know that _Match implements __lt__
126
+ # see: https://github.com/python/typing/issues/760#issuecomment-710670503
127
+ left_p, left_q = min(_Match(*m) for m in matches).types # type: ignore[type-var]
128
+ right_q, right_p = min(_Match(*reversed(m)) for m in matches).types # type: ignore[type-var]
129
+ left_fun = _KL_REGISTRY[left_p, left_q]
130
+ right_fun = _KL_REGISTRY[right_p, right_q]
131
+ if left_fun is not right_fun:
132
+ warnings.warn(
133
+ f"Ambiguous kl_divergence({type_p.__name__}, {type_q.__name__}). "
134
+ f"Please register_kl({left_p.__name__}, {right_q.__name__})",
135
+ RuntimeWarning,
136
+ )
137
+ return left_fun
138
+
139
+
140
+ def _infinite_like(tensor):
141
+ """
142
+ Helper function for obtaining infinite KL Divergence throughout
143
+ """
144
+ return torch.full_like(tensor, inf)
145
+
146
+
147
+ def _x_log_x(tensor):
148
+ """
149
+ Utility function for calculating x log x
150
+ """
151
+ return tensor * tensor.log()
152
+
153
+
154
+ def _batch_trace_XXT(bmat):
155
+ """
156
+ Utility function for calculating the trace of XX^{T} with X having arbitrary trailing batch dimensions
157
+ """
158
+ n = bmat.size(-1)
159
+ m = bmat.size(-2)
160
+ flat_trace = bmat.reshape(-1, m * n).pow(2).sum(-1)
161
+ return flat_trace.reshape(bmat.shape[:-2])
162
+
163
+
164
+ def kl_divergence(p: Distribution, q: Distribution) -> torch.Tensor:
165
+ r"""
166
+ Compute Kullback-Leibler divergence :math:`KL(p \| q)` between two distributions.
167
+
168
+ .. math::
169
+
170
+ KL(p \| q) = \int p(x) \log\frac {p(x)} {q(x)} \,dx
171
+
172
+ Args:
173
+ p (Distribution): A :class:`~torch.distributions.Distribution` object.
174
+ q (Distribution): A :class:`~torch.distributions.Distribution` object.
175
+
176
+ Returns:
177
+ Tensor: A batch of KL divergences of shape `batch_shape`.
178
+
179
+ Raises:
180
+ NotImplementedError: If the distribution types have not been registered via
181
+ :meth:`register_kl`.
182
+ """
183
+ try:
184
+ fun = _KL_MEMOIZE[type(p), type(q)]
185
+ except KeyError:
186
+ fun = _dispatch_kl(type(p), type(q))
187
+ _KL_MEMOIZE[type(p), type(q)] = fun
188
+ if fun is NotImplemented:
189
+ raise NotImplementedError(
190
+ f"No KL(p || q) is implemented for p type {p.__class__.__name__} and q type {q.__class__.__name__}"
191
+ )
192
+ return fun(p, q)
193
+
194
+
195
+ ################################################################################
196
+ # KL Divergence Implementations
197
+ ################################################################################
198
+
199
+ # Same distributions
200
+
201
+
202
+ @register_kl(Bernoulli, Bernoulli)
203
+ def _kl_bernoulli_bernoulli(p, q):
204
+ t1 = p.probs * (
205
+ torch.nn.functional.softplus(-q.logits)
206
+ - torch.nn.functional.softplus(-p.logits)
207
+ )
208
+ t1[q.probs == 0] = inf
209
+ t1[p.probs == 0] = 0
210
+ t2 = (1 - p.probs) * (
211
+ torch.nn.functional.softplus(q.logits) - torch.nn.functional.softplus(p.logits)
212
+ )
213
+ t2[q.probs == 1] = inf
214
+ t2[p.probs == 1] = 0
215
+ return t1 + t2
216
+
217
+
218
+ @register_kl(Beta, Beta)
219
+ def _kl_beta_beta(p, q):
220
+ sum_params_p = p.concentration1 + p.concentration0
221
+ sum_params_q = q.concentration1 + q.concentration0
222
+ t1 = q.concentration1.lgamma() + q.concentration0.lgamma() + (sum_params_p).lgamma()
223
+ t2 = p.concentration1.lgamma() + p.concentration0.lgamma() + (sum_params_q).lgamma()
224
+ t3 = (p.concentration1 - q.concentration1) * torch.digamma(p.concentration1)
225
+ t4 = (p.concentration0 - q.concentration0) * torch.digamma(p.concentration0)
226
+ t5 = (sum_params_q - sum_params_p) * torch.digamma(sum_params_p)
227
+ return t1 - t2 + t3 + t4 + t5
228
+
229
+
230
+ @register_kl(Binomial, Binomial)
231
+ def _kl_binomial_binomial(p, q):
232
+ # from https://math.stackexchange.com/questions/2214993/
233
+ # kullback-leibler-divergence-for-binomial-distributions-p-and-q
234
+ if (p.total_count < q.total_count).any():
235
+ raise NotImplementedError(
236
+ "KL between Binomials where q.total_count > p.total_count is not implemented"
237
+ )
238
+ kl = p.total_count * (
239
+ p.probs * (p.logits - q.logits) + (-p.probs).log1p() - (-q.probs).log1p()
240
+ )
241
+ inf_idxs = p.total_count > q.total_count
242
+ kl[inf_idxs] = _infinite_like(kl[inf_idxs])
243
+ return kl
244
+
245
+
246
+ @register_kl(Categorical, Categorical)
247
+ def _kl_categorical_categorical(p, q):
248
+ t = p.probs * (p.logits - q.logits)
249
+ t[(q.probs == 0).expand_as(t)] = inf
250
+ t[(p.probs == 0).expand_as(t)] = 0
251
+ return t.sum(-1)
252
+
253
+
254
+ @register_kl(ContinuousBernoulli, ContinuousBernoulli)
255
+ def _kl_continuous_bernoulli_continuous_bernoulli(p, q):
256
+ t1 = p.mean * (p.logits - q.logits)
257
+ t2 = p._cont_bern_log_norm() + torch.log1p(-p.probs)
258
+ t3 = -q._cont_bern_log_norm() - torch.log1p(-q.probs)
259
+ return t1 + t2 + t3
260
+
261
+
262
+ @register_kl(Dirichlet, Dirichlet)
263
+ def _kl_dirichlet_dirichlet(p, q):
264
+ # From http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/
265
+ sum_p_concentration = p.concentration.sum(-1)
266
+ sum_q_concentration = q.concentration.sum(-1)
267
+ t1 = sum_p_concentration.lgamma() - sum_q_concentration.lgamma()
268
+ t2 = (p.concentration.lgamma() - q.concentration.lgamma()).sum(-1)
269
+ t3 = p.concentration - q.concentration
270
+ t4 = p.concentration.digamma() - sum_p_concentration.digamma().unsqueeze(-1)
271
+ return t1 - t2 + (t3 * t4).sum(-1)
272
+
273
+
274
+ @register_kl(Exponential, Exponential)
275
+ def _kl_exponential_exponential(p, q):
276
+ rate_ratio = q.rate / p.rate
277
+ t1 = -rate_ratio.log()
278
+ return t1 + rate_ratio - 1
279
+
280
+
281
+ @register_kl(ExponentialFamily, ExponentialFamily)
282
+ def _kl_expfamily_expfamily(p, q):
283
+ if not type(p) == type(q):
284
+ raise NotImplementedError(
285
+ "The cross KL-divergence between different exponential families cannot \
286
+ be computed using Bregman divergences"
287
+ )
288
+ p_nparams = [np.detach().requires_grad_() for np in p._natural_params]
289
+ q_nparams = q._natural_params
290
+ lg_normal = p._log_normalizer(*p_nparams)
291
+ gradients = torch.autograd.grad(lg_normal.sum(), p_nparams, create_graph=True)
292
+ result = q._log_normalizer(*q_nparams) - lg_normal
293
+ for pnp, qnp, g in zip(p_nparams, q_nparams, gradients):
294
+ term = (qnp - pnp) * g
295
+ result -= _sum_rightmost(term, len(q.event_shape))
296
+ return result
297
+
298
+
299
+ @register_kl(Gamma, Gamma)
300
+ def _kl_gamma_gamma(p, q):
301
+ t1 = q.concentration * (p.rate / q.rate).log()
302
+ t2 = torch.lgamma(q.concentration) - torch.lgamma(p.concentration)
303
+ t3 = (p.concentration - q.concentration) * torch.digamma(p.concentration)
304
+ t4 = (q.rate - p.rate) * (p.concentration / p.rate)
305
+ return t1 + t2 + t3 + t4
306
+
307
+
308
+ @register_kl(Gumbel, Gumbel)
309
+ def _kl_gumbel_gumbel(p, q):
310
+ ct1 = p.scale / q.scale
311
+ ct2 = q.loc / q.scale
312
+ ct3 = p.loc / q.scale
313
+ t1 = -ct1.log() - ct2 + ct3
314
+ t2 = ct1 * _euler_gamma
315
+ t3 = torch.exp(ct2 + (1 + ct1).lgamma() - ct3)
316
+ return t1 + t2 + t3 - (1 + _euler_gamma)
317
+
318
+
319
+ @register_kl(Geometric, Geometric)
320
+ def _kl_geometric_geometric(p, q):
321
+ return -p.entropy() - torch.log1p(-q.probs) / p.probs - q.logits
322
+
323
+
324
+ @register_kl(HalfNormal, HalfNormal)
325
+ def _kl_halfnormal_halfnormal(p, q):
326
+ return _kl_normal_normal(p.base_dist, q.base_dist)
327
+
328
+
329
+ @register_kl(Laplace, Laplace)
330
+ def _kl_laplace_laplace(p, q):
331
+ # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf
332
+ scale_ratio = p.scale / q.scale
333
+ loc_abs_diff = (p.loc - q.loc).abs()
334
+ t1 = -scale_ratio.log()
335
+ t2 = loc_abs_diff / q.scale
336
+ t3 = scale_ratio * torch.exp(-loc_abs_diff / p.scale)
337
+ return t1 + t2 + t3 - 1
338
+
339
+
340
+ @register_kl(LowRankMultivariateNormal, LowRankMultivariateNormal)
341
+ def _kl_lowrankmultivariatenormal_lowrankmultivariatenormal(p, q):
342
+ if p.event_shape != q.event_shape:
343
+ raise ValueError(
344
+ "KL-divergence between two Low Rank Multivariate Normals with\
345
+ different event shapes cannot be computed"
346
+ )
347
+
348
+ term1 = _batch_lowrank_logdet(
349
+ q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, q._capacitance_tril
350
+ ) - _batch_lowrank_logdet(
351
+ p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag, p._capacitance_tril
352
+ )
353
+ term3 = _batch_lowrank_mahalanobis(
354
+ q._unbroadcasted_cov_factor,
355
+ q._unbroadcasted_cov_diag,
356
+ q.loc - p.loc,
357
+ q._capacitance_tril,
358
+ )
359
+ # Expands term2 according to
360
+ # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ (pW @ pW.T + pD)
361
+ # = [inv(qD) - A.T @ A] @ (pD + pW @ pW.T)
362
+ qWt_qDinv = q._unbroadcasted_cov_factor.mT / q._unbroadcasted_cov_diag.unsqueeze(-2)
363
+ A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False)
364
+ term21 = (p._unbroadcasted_cov_diag / q._unbroadcasted_cov_diag).sum(-1)
365
+ term22 = _batch_trace_XXT(
366
+ p._unbroadcasted_cov_factor * q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1)
367
+ )
368
+ term23 = _batch_trace_XXT(A * p._unbroadcasted_cov_diag.sqrt().unsqueeze(-2))
369
+ term24 = _batch_trace_XXT(A.matmul(p._unbroadcasted_cov_factor))
370
+ term2 = term21 + term22 - term23 - term24
371
+ return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
372
+
373
+
374
+ @register_kl(MultivariateNormal, LowRankMultivariateNormal)
375
+ def _kl_multivariatenormal_lowrankmultivariatenormal(p, q):
376
+ if p.event_shape != q.event_shape:
377
+ raise ValueError(
378
+ "KL-divergence between two (Low Rank) Multivariate Normals with\
379
+ different event shapes cannot be computed"
380
+ )
381
+
382
+ term1 = _batch_lowrank_logdet(
383
+ q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, q._capacitance_tril
384
+ ) - 2 * p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
385
+ term3 = _batch_lowrank_mahalanobis(
386
+ q._unbroadcasted_cov_factor,
387
+ q._unbroadcasted_cov_diag,
388
+ q.loc - p.loc,
389
+ q._capacitance_tril,
390
+ )
391
+ # Expands term2 according to
392
+ # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ p_tril @ p_tril.T
393
+ # = [inv(qD) - A.T @ A] @ p_tril @ p_tril.T
394
+ qWt_qDinv = q._unbroadcasted_cov_factor.mT / q._unbroadcasted_cov_diag.unsqueeze(-2)
395
+ A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False)
396
+ term21 = _batch_trace_XXT(
397
+ p._unbroadcasted_scale_tril * q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1)
398
+ )
399
+ term22 = _batch_trace_XXT(A.matmul(p._unbroadcasted_scale_tril))
400
+ term2 = term21 - term22
401
+ return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
402
+
403
+
404
+ @register_kl(LowRankMultivariateNormal, MultivariateNormal)
405
+ def _kl_lowrankmultivariatenormal_multivariatenormal(p, q):
406
+ if p.event_shape != q.event_shape:
407
+ raise ValueError(
408
+ "KL-divergence between two (Low Rank) Multivariate Normals with\
409
+ different event shapes cannot be computed"
410
+ )
411
+
412
+ term1 = 2 * q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(
413
+ -1
414
+ ) - _batch_lowrank_logdet(
415
+ p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag, p._capacitance_tril
416
+ )
417
+ term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc))
418
+ # Expands term2 according to
419
+ # inv(qcov) @ pcov = inv(q_tril @ q_tril.T) @ (pW @ pW.T + pD)
420
+ combined_batch_shape = torch._C._infer_size(
421
+ q._unbroadcasted_scale_tril.shape[:-2], p._unbroadcasted_cov_factor.shape[:-2]
422
+ )
423
+ n = p.event_shape[0]
424
+ q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
425
+ p_cov_factor = p._unbroadcasted_cov_factor.expand(
426
+ combined_batch_shape + (n, p.cov_factor.size(-1))
427
+ )
428
+ p_cov_diag = torch.diag_embed(p._unbroadcasted_cov_diag.sqrt()).expand(
429
+ combined_batch_shape + (n, n)
430
+ )
431
+ term21 = _batch_trace_XXT(
432
+ torch.linalg.solve_triangular(q_scale_tril, p_cov_factor, upper=False)
433
+ )
434
+ term22 = _batch_trace_XXT(
435
+ torch.linalg.solve_triangular(q_scale_tril, p_cov_diag, upper=False)
436
+ )
437
+ term2 = term21 + term22
438
+ return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
439
+
440
+
441
+ @register_kl(MultivariateNormal, MultivariateNormal)
442
+ def _kl_multivariatenormal_multivariatenormal(p, q):
443
+ # From https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Kullback%E2%80%93Leibler_divergence
444
+ if p.event_shape != q.event_shape:
445
+ raise ValueError(
446
+ "KL-divergence between two Multivariate Normals with\
447
+ different event shapes cannot be computed"
448
+ )
449
+
450
+ half_term1 = q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(
451
+ -1
452
+ ) - p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
453
+ combined_batch_shape = torch._C._infer_size(
454
+ q._unbroadcasted_scale_tril.shape[:-2], p._unbroadcasted_scale_tril.shape[:-2]
455
+ )
456
+ n = p.event_shape[0]
457
+ q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
458
+ p_scale_tril = p._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
459
+ term2 = _batch_trace_XXT(
460
+ torch.linalg.solve_triangular(q_scale_tril, p_scale_tril, upper=False)
461
+ )
462
+ term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc))
463
+ return half_term1 + 0.5 * (term2 + term3 - n)
464
+
465
+
466
+ @register_kl(Normal, Normal)
467
+ def _kl_normal_normal(p, q):
468
+ var_ratio = (p.scale / q.scale).pow(2)
469
+ t1 = ((p.loc - q.loc) / q.scale).pow(2)
470
+ return 0.5 * (var_ratio + t1 - 1 - var_ratio.log())
471
+
472
+
473
+ @register_kl(OneHotCategorical, OneHotCategorical)
474
+ def _kl_onehotcategorical_onehotcategorical(p, q):
475
+ return _kl_categorical_categorical(p._categorical, q._categorical)
476
+
477
+
478
+ @register_kl(Pareto, Pareto)
479
+ def _kl_pareto_pareto(p, q):
480
+ # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf
481
+ scale_ratio = p.scale / q.scale
482
+ alpha_ratio = q.alpha / p.alpha
483
+ t1 = q.alpha * scale_ratio.log()
484
+ t2 = -alpha_ratio.log()
485
+ result = t1 + t2 + alpha_ratio - 1
486
+ result[p.support.lower_bound < q.support.lower_bound] = inf
487
+ return result
488
+
489
+
490
+ @register_kl(Poisson, Poisson)
491
+ def _kl_poisson_poisson(p, q):
492
+ return p.rate * (p.rate.log() - q.rate.log()) - (p.rate - q.rate)
493
+
494
+
495
+ @register_kl(TransformedDistribution, TransformedDistribution)
496
+ def _kl_transformed_transformed(p, q):
497
+ if p.transforms != q.transforms:
498
+ raise NotImplementedError
499
+ if p.event_shape != q.event_shape:
500
+ raise NotImplementedError
501
+ return kl_divergence(p.base_dist, q.base_dist)
502
+
503
+
504
+ @register_kl(Uniform, Uniform)
505
+ def _kl_uniform_uniform(p, q):
506
+ result = ((q.high - q.low) / (p.high - p.low)).log()
507
+ result[(q.low > p.low) | (q.high < p.high)] = inf
508
+ return result
509
+
510
+
511
+ # Different distributions
512
+ @register_kl(Bernoulli, Poisson)
513
+ def _kl_bernoulli_poisson(p, q):
514
+ return -p.entropy() - (p.probs * q.rate.log() - q.rate)
515
+
516
+
517
+ @register_kl(Beta, ContinuousBernoulli)
518
+ def _kl_beta_continuous_bernoulli(p, q):
519
+ return (
520
+ -p.entropy()
521
+ - p.mean * q.logits
522
+ - torch.log1p(-q.probs)
523
+ - q._cont_bern_log_norm()
524
+ )
525
+
526
+
527
+ @register_kl(Beta, Pareto)
528
+ def _kl_beta_infinity(p, q):
529
+ return _infinite_like(p.concentration1)
530
+
531
+
532
+ @register_kl(Beta, Exponential)
533
+ def _kl_beta_exponential(p, q):
534
+ return (
535
+ -p.entropy()
536
+ - q.rate.log()
537
+ + q.rate * (p.concentration1 / (p.concentration1 + p.concentration0))
538
+ )
539
+
540
+
541
+ @register_kl(Beta, Gamma)
542
+ def _kl_beta_gamma(p, q):
543
+ t1 = -p.entropy()
544
+ t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
545
+ t3 = (q.concentration - 1) * (
546
+ p.concentration1.digamma() - (p.concentration1 + p.concentration0).digamma()
547
+ )
548
+ t4 = q.rate * p.concentration1 / (p.concentration1 + p.concentration0)
549
+ return t1 + t2 - t3 + t4
550
+
551
+
552
+ # TODO: Add Beta-Laplace KL Divergence
553
+
554
+
555
+ @register_kl(Beta, Normal)
556
+ def _kl_beta_normal(p, q):
557
+ E_beta = p.concentration1 / (p.concentration1 + p.concentration0)
558
+ var_normal = q.scale.pow(2)
559
+ t1 = -p.entropy()
560
+ t2 = 0.5 * (var_normal * 2 * math.pi).log()
561
+ t3 = (
562
+ E_beta * (1 - E_beta) / (p.concentration1 + p.concentration0 + 1)
563
+ + E_beta.pow(2)
564
+ ) * 0.5
565
+ t4 = q.loc * E_beta
566
+ t5 = q.loc.pow(2) * 0.5
567
+ return t1 + t2 + (t3 - t4 + t5) / var_normal
568
+
569
+
570
+ @register_kl(Beta, Uniform)
571
+ def _kl_beta_uniform(p, q):
572
+ result = -p.entropy() + (q.high - q.low).log()
573
+ result[(q.low > p.support.lower_bound) | (q.high < p.support.upper_bound)] = inf
574
+ return result
575
+
576
+
577
+ # Note that the KL between a ContinuousBernoulli and Beta has no closed form
578
+
579
+
580
+ @register_kl(ContinuousBernoulli, Pareto)
581
+ def _kl_continuous_bernoulli_infinity(p, q):
582
+ return _infinite_like(p.probs)
583
+
584
+
585
+ @register_kl(ContinuousBernoulli, Exponential)
586
+ def _kl_continuous_bernoulli_exponential(p, q):
587
+ return -p.entropy() - torch.log(q.rate) + q.rate * p.mean
588
+
589
+
590
+ # Note that the KL between a ContinuousBernoulli and Gamma has no closed form
591
+ # TODO: Add ContinuousBernoulli-Laplace KL Divergence
592
+
593
+
594
+ @register_kl(ContinuousBernoulli, Normal)
595
+ def _kl_continuous_bernoulli_normal(p, q):
596
+ t1 = -p.entropy()
597
+ t2 = 0.5 * (math.log(2.0 * math.pi) + torch.square(q.loc / q.scale)) + torch.log(
598
+ q.scale
599
+ )
600
+ t3 = (p.variance + torch.square(p.mean) - 2.0 * q.loc * p.mean) / (
601
+ 2.0 * torch.square(q.scale)
602
+ )
603
+ return t1 + t2 + t3
604
+
605
+
606
+ @register_kl(ContinuousBernoulli, Uniform)
607
+ def _kl_continuous_bernoulli_uniform(p, q):
608
+ result = -p.entropy() + (q.high - q.low).log()
609
+ return torch.where(
610
+ torch.max(
611
+ torch.ge(q.low, p.support.lower_bound),
612
+ torch.le(q.high, p.support.upper_bound),
613
+ ),
614
+ torch.ones_like(result) * inf,
615
+ result,
616
+ )
617
+
618
+
619
+ @register_kl(Exponential, Beta)
620
+ @register_kl(Exponential, ContinuousBernoulli)
621
+ @register_kl(Exponential, Pareto)
622
+ @register_kl(Exponential, Uniform)
623
+ def _kl_exponential_infinity(p, q):
624
+ return _infinite_like(p.rate)
625
+
626
+
627
+ @register_kl(Exponential, Gamma)
628
+ def _kl_exponential_gamma(p, q):
629
+ ratio = q.rate / p.rate
630
+ t1 = -q.concentration * torch.log(ratio)
631
+ return (
632
+ t1
633
+ + ratio
634
+ + q.concentration.lgamma()
635
+ + q.concentration * _euler_gamma
636
+ - (1 + _euler_gamma)
637
+ )
638
+
639
+
640
+ @register_kl(Exponential, Gumbel)
641
+ def _kl_exponential_gumbel(p, q):
642
+ scale_rate_prod = p.rate * q.scale
643
+ loc_scale_ratio = q.loc / q.scale
644
+ t1 = scale_rate_prod.log() - 1
645
+ t2 = torch.exp(loc_scale_ratio) * scale_rate_prod / (scale_rate_prod + 1)
646
+ t3 = scale_rate_prod.reciprocal()
647
+ return t1 - loc_scale_ratio + t2 + t3
648
+
649
+
650
+ # TODO: Add Exponential-Laplace KL Divergence
651
+
652
+
653
+ @register_kl(Exponential, Normal)
654
+ def _kl_exponential_normal(p, q):
655
+ var_normal = q.scale.pow(2)
656
+ rate_sqr = p.rate.pow(2)
657
+ t1 = 0.5 * torch.log(rate_sqr * var_normal * 2 * math.pi)
658
+ t2 = rate_sqr.reciprocal()
659
+ t3 = q.loc / p.rate
660
+ t4 = q.loc.pow(2) * 0.5
661
+ return t1 - 1 + (t2 - t3 + t4) / var_normal
662
+
663
+
664
+ @register_kl(Gamma, Beta)
665
+ @register_kl(Gamma, ContinuousBernoulli)
666
+ @register_kl(Gamma, Pareto)
667
+ @register_kl(Gamma, Uniform)
668
+ def _kl_gamma_infinity(p, q):
669
+ return _infinite_like(p.concentration)
670
+
671
+
672
+ @register_kl(Gamma, Exponential)
673
+ def _kl_gamma_exponential(p, q):
674
+ return -p.entropy() - q.rate.log() + q.rate * p.concentration / p.rate
675
+
676
+
677
+ @register_kl(Gamma, Gumbel)
678
+ def _kl_gamma_gumbel(p, q):
679
+ beta_scale_prod = p.rate * q.scale
680
+ loc_scale_ratio = q.loc / q.scale
681
+ t1 = (
682
+ (p.concentration - 1) * p.concentration.digamma()
683
+ - p.concentration.lgamma()
684
+ - p.concentration
685
+ )
686
+ t2 = beta_scale_prod.log() + p.concentration / beta_scale_prod
687
+ t3 = (
688
+ torch.exp(loc_scale_ratio)
689
+ * (1 + beta_scale_prod.reciprocal()).pow(-p.concentration)
690
+ - loc_scale_ratio
691
+ )
692
+ return t1 + t2 + t3
693
+
694
+
695
+ # TODO: Add Gamma-Laplace KL Divergence
696
+
697
+
698
+ @register_kl(Gamma, Normal)
699
+ def _kl_gamma_normal(p, q):
700
+ var_normal = q.scale.pow(2)
701
+ beta_sqr = p.rate.pow(2)
702
+ t1 = (
703
+ 0.5 * torch.log(beta_sqr * var_normal * 2 * math.pi)
704
+ - p.concentration
705
+ - p.concentration.lgamma()
706
+ )
707
+ t2 = 0.5 * (p.concentration.pow(2) + p.concentration) / beta_sqr
708
+ t3 = q.loc * p.concentration / p.rate
709
+ t4 = 0.5 * q.loc.pow(2)
710
+ return (
711
+ t1
712
+ + (p.concentration - 1) * p.concentration.digamma()
713
+ + (t2 - t3 + t4) / var_normal
714
+ )
715
+
716
+
717
+ @register_kl(Gumbel, Beta)
718
+ @register_kl(Gumbel, ContinuousBernoulli)
719
+ @register_kl(Gumbel, Exponential)
720
+ @register_kl(Gumbel, Gamma)
721
+ @register_kl(Gumbel, Pareto)
722
+ @register_kl(Gumbel, Uniform)
723
+ def _kl_gumbel_infinity(p, q):
724
+ return _infinite_like(p.loc)
725
+
726
+
727
+ # TODO: Add Gumbel-Laplace KL Divergence
728
+
729
+
730
+ @register_kl(Gumbel, Normal)
731
+ def _kl_gumbel_normal(p, q):
732
+ param_ratio = p.scale / q.scale
733
+ t1 = (param_ratio / math.sqrt(2 * math.pi)).log()
734
+ t2 = (math.pi * param_ratio * 0.5).pow(2) / 3
735
+ t3 = ((p.loc + p.scale * _euler_gamma - q.loc) / q.scale).pow(2) * 0.5
736
+ return -t1 + t2 + t3 - (_euler_gamma + 1)
737
+
738
+
739
+ @register_kl(Laplace, Beta)
740
+ @register_kl(Laplace, ContinuousBernoulli)
741
+ @register_kl(Laplace, Exponential)
742
+ @register_kl(Laplace, Gamma)
743
+ @register_kl(Laplace, Pareto)
744
+ @register_kl(Laplace, Uniform)
745
+ def _kl_laplace_infinity(p, q):
746
+ return _infinite_like(p.loc)
747
+
748
+
749
+ @register_kl(Laplace, Normal)
750
+ def _kl_laplace_normal(p, q):
751
+ var_normal = q.scale.pow(2)
752
+ scale_sqr_var_ratio = p.scale.pow(2) / var_normal
753
+ t1 = 0.5 * torch.log(2 * scale_sqr_var_ratio / math.pi)
754
+ t2 = 0.5 * p.loc.pow(2)
755
+ t3 = p.loc * q.loc
756
+ t4 = 0.5 * q.loc.pow(2)
757
+ return -t1 + scale_sqr_var_ratio + (t2 - t3 + t4) / var_normal - 1
758
+
759
+
760
+ @register_kl(Normal, Beta)
761
+ @register_kl(Normal, ContinuousBernoulli)
762
+ @register_kl(Normal, Exponential)
763
+ @register_kl(Normal, Gamma)
764
+ @register_kl(Normal, Pareto)
765
+ @register_kl(Normal, Uniform)
766
+ def _kl_normal_infinity(p, q):
767
+ return _infinite_like(p.loc)
768
+
769
+
770
+ @register_kl(Normal, Gumbel)
771
+ def _kl_normal_gumbel(p, q):
772
+ mean_scale_ratio = p.loc / q.scale
773
+ var_scale_sqr_ratio = (p.scale / q.scale).pow(2)
774
+ loc_scale_ratio = q.loc / q.scale
775
+ t1 = var_scale_sqr_ratio.log() * 0.5
776
+ t2 = mean_scale_ratio - loc_scale_ratio
777
+ t3 = torch.exp(-mean_scale_ratio + 0.5 * var_scale_sqr_ratio + loc_scale_ratio)
778
+ return -t1 + t2 + t3 - (0.5 * (1 + math.log(2 * math.pi)))
779
+
780
+
781
+ @register_kl(Normal, Laplace)
782
+ def _kl_normal_laplace(p, q):
783
+ loc_diff = p.loc - q.loc
784
+ scale_ratio = p.scale / q.scale
785
+ loc_diff_scale_ratio = loc_diff / p.scale
786
+ t1 = torch.log(scale_ratio)
787
+ t2 = (
788
+ math.sqrt(2 / math.pi) * p.scale * torch.exp(-0.5 * loc_diff_scale_ratio.pow(2))
789
+ )
790
+ t3 = loc_diff * torch.erf(math.sqrt(0.5) * loc_diff_scale_ratio)
791
+ return -t1 + (t2 + t3) / q.scale - (0.5 * (1 + math.log(0.5 * math.pi)))
792
+
793
+
794
+ @register_kl(Pareto, Beta)
795
+ @register_kl(Pareto, ContinuousBernoulli)
796
+ @register_kl(Pareto, Uniform)
797
+ def _kl_pareto_infinity(p, q):
798
+ return _infinite_like(p.scale)
799
+
800
+
801
+ @register_kl(Pareto, Exponential)
802
+ def _kl_pareto_exponential(p, q):
803
+ scale_rate_prod = p.scale * q.rate
804
+ t1 = (p.alpha / scale_rate_prod).log()
805
+ t2 = p.alpha.reciprocal()
806
+ t3 = p.alpha * scale_rate_prod / (p.alpha - 1)
807
+ result = t1 - t2 + t3 - 1
808
+ result[p.alpha <= 1] = inf
809
+ return result
810
+
811
+
812
+ @register_kl(Pareto, Gamma)
813
+ def _kl_pareto_gamma(p, q):
814
+ common_term = p.scale.log() + p.alpha.reciprocal()
815
+ t1 = p.alpha.log() - common_term
816
+ t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
817
+ t3 = (1 - q.concentration) * common_term
818
+ t4 = q.rate * p.alpha * p.scale / (p.alpha - 1)
819
+ result = t1 + t2 + t3 + t4 - 1
820
+ result[p.alpha <= 1] = inf
821
+ return result
822
+
823
+
824
+ # TODO: Add Pareto-Laplace KL Divergence
825
+
826
+
827
+ @register_kl(Pareto, Normal)
828
+ def _kl_pareto_normal(p, q):
829
+ var_normal = 2 * q.scale.pow(2)
830
+ common_term = p.scale / (p.alpha - 1)
831
+ t1 = (math.sqrt(2 * math.pi) * q.scale * p.alpha / p.scale).log()
832
+ t2 = p.alpha.reciprocal()
833
+ t3 = p.alpha * common_term.pow(2) / (p.alpha - 2)
834
+ t4 = (p.alpha * common_term - q.loc).pow(2)
835
+ result = t1 - t2 + (t3 + t4) / var_normal - 1
836
+ result[p.alpha <= 2] = inf
837
+ return result
838
+
839
+
840
+ @register_kl(Poisson, Bernoulli)
841
+ @register_kl(Poisson, Binomial)
842
+ def _kl_poisson_infinity(p, q):
843
+ return _infinite_like(p.rate)
844
+
845
+
846
+ @register_kl(Uniform, Beta)
847
+ def _kl_uniform_beta(p, q):
848
+ common_term = p.high - p.low
849
+ t1 = torch.log(common_term)
850
+ t2 = (
851
+ (q.concentration1 - 1)
852
+ * (_x_log_x(p.high) - _x_log_x(p.low) - common_term)
853
+ / common_term
854
+ )
855
+ t3 = (
856
+ (q.concentration0 - 1)
857
+ * (_x_log_x(1 - p.high) - _x_log_x(1 - p.low) + common_term)
858
+ / common_term
859
+ )
860
+ t4 = (
861
+ q.concentration1.lgamma()
862
+ + q.concentration0.lgamma()
863
+ - (q.concentration1 + q.concentration0).lgamma()
864
+ )
865
+ result = t3 + t4 - t1 - t2
866
+ result[(p.high > q.support.upper_bound) | (p.low < q.support.lower_bound)] = inf
867
+ return result
868
+
869
+
870
+ @register_kl(Uniform, ContinuousBernoulli)
871
+ def _kl_uniform_continuous_bernoulli(p, q):
872
+ result = (
873
+ -p.entropy()
874
+ - p.mean * q.logits
875
+ - torch.log1p(-q.probs)
876
+ - q._cont_bern_log_norm()
877
+ )
878
+ return torch.where(
879
+ torch.max(
880
+ torch.ge(p.high, q.support.upper_bound),
881
+ torch.le(p.low, q.support.lower_bound),
882
+ ),
883
+ torch.ones_like(result) * inf,
884
+ result,
885
+ )
886
+
887
+
888
+ @register_kl(Uniform, Exponential)
889
+ def _kl_uniform_exponetial(p, q):
890
+ result = q.rate * (p.high + p.low) / 2 - ((p.high - p.low) * q.rate).log()
891
+ result[p.low < q.support.lower_bound] = inf
892
+ return result
893
+
894
+
895
+ @register_kl(Uniform, Gamma)
896
+ def _kl_uniform_gamma(p, q):
897
+ common_term = p.high - p.low
898
+ t1 = common_term.log()
899
+ t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
900
+ t3 = (
901
+ (1 - q.concentration)
902
+ * (_x_log_x(p.high) - _x_log_x(p.low) - common_term)
903
+ / common_term
904
+ )
905
+ t4 = q.rate * (p.high + p.low) / 2
906
+ result = -t1 + t2 + t3 + t4
907
+ result[p.low < q.support.lower_bound] = inf
908
+ return result
909
+
910
+
911
+ @register_kl(Uniform, Gumbel)
912
+ def _kl_uniform_gumbel(p, q):
913
+ common_term = q.scale / (p.high - p.low)
914
+ high_loc_diff = (p.high - q.loc) / q.scale
915
+ low_loc_diff = (p.low - q.loc) / q.scale
916
+ t1 = common_term.log() + 0.5 * (high_loc_diff + low_loc_diff)
917
+ t2 = common_term * (torch.exp(-high_loc_diff) - torch.exp(-low_loc_diff))
918
+ return t1 - t2
919
+
920
+
921
+ # TODO: Uniform-Laplace KL Divergence
922
+
923
+
924
+ @register_kl(Uniform, Normal)
925
+ def _kl_uniform_normal(p, q):
926
+ common_term = p.high - p.low
927
+ t1 = (math.sqrt(math.pi * 2) * q.scale / common_term).log()
928
+ t2 = (common_term).pow(2) / 12
929
+ t3 = ((p.high + p.low - 2 * q.loc) / 2).pow(2)
930
+ return t1 + 0.5 * (t2 + t3) / q.scale.pow(2)
931
+
932
+
933
+ @register_kl(Uniform, Pareto)
934
+ def _kl_uniform_pareto(p, q):
935
+ support_uniform = p.high - p.low
936
+ t1 = (q.alpha * q.scale.pow(q.alpha) * (support_uniform)).log()
937
+ t2 = (_x_log_x(p.high) - _x_log_x(p.low) - support_uniform) / support_uniform
938
+ result = t2 * (q.alpha + 1) - t1
939
+ result[p.low < q.support.lower_bound] = inf
940
+ return result
941
+
942
+
943
+ @register_kl(Independent, Independent)
944
+ def _kl_independent_independent(p, q):
945
+ if p.reinterpreted_batch_ndims != q.reinterpreted_batch_ndims:
946
+ raise NotImplementedError
947
+ result = kl_divergence(p.base_dist, q.base_dist)
948
+ return _sum_rightmost(result, p.reinterpreted_batch_ndims)
949
+
950
+
951
+ @register_kl(Cauchy, Cauchy)
952
+ def _kl_cauchy_cauchy(p, q):
953
+ # From https://arxiv.org/abs/1905.10965
954
+ t1 = ((p.scale + q.scale).pow(2) + (p.loc - q.loc).pow(2)).log()
955
+ t2 = (4 * p.scale * q.scale).log()
956
+ return t1 - t2
957
+
958
+
959
+ def _add_kl_info():
960
+ """Appends a list of implemented KL functions to the doc for kl_divergence."""
961
+ rows = [
962
+ "KL divergence is currently implemented for the following distribution pairs:"
963
+ ]
964
+ for p, q in sorted(
965
+ _KL_REGISTRY, key=lambda p_q: (p_q[0].__name__, p_q[1].__name__)
966
+ ):
967
+ rows.append(
968
+ f"* :class:`~torch.distributions.{p.__name__}` and :class:`~torch.distributions.{q.__name__}`"
969
+ )
970
+ kl_info = "\n\t".join(rows)
971
+ if kl_divergence.__doc__:
972
+ kl_divergence.__doc__ += kl_info # type: ignore[operator]
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/kumaraswamy.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from torch import nan
4
+ from torch.distributions import constraints
5
+ from torch.distributions.transformed_distribution import TransformedDistribution
6
+ from torch.distributions.transforms import AffineTransform, PowerTransform
7
+ from torch.distributions.uniform import Uniform
8
+ from torch.distributions.utils import broadcast_all, euler_constant
9
+
10
+
11
+ __all__ = ["Kumaraswamy"]
12
+
13
+
14
+ def _moments(a, b, n):
15
+ """
16
+ Computes nth moment of Kumaraswamy using using torch.lgamma
17
+ """
18
+ arg1 = 1 + n / a
19
+ log_value = torch.lgamma(arg1) + torch.lgamma(b) - torch.lgamma(arg1 + b)
20
+ return b * torch.exp(log_value)
21
+
22
+
23
+ class Kumaraswamy(TransformedDistribution):
24
+ r"""
25
+ Samples from a Kumaraswamy distribution.
26
+
27
+ Example::
28
+
29
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
30
+ >>> m = Kumaraswamy(torch.tensor([1.0]), torch.tensor([1.0]))
31
+ >>> m.sample() # sample from a Kumaraswamy distribution with concentration alpha=1 and beta=1
32
+ tensor([ 0.1729])
33
+
34
+ Args:
35
+ concentration1 (float or Tensor): 1st concentration parameter of the distribution
36
+ (often referred to as alpha)
37
+ concentration0 (float or Tensor): 2nd concentration parameter of the distribution
38
+ (often referred to as beta)
39
+ """
40
+ arg_constraints = {
41
+ "concentration1": constraints.positive,
42
+ "concentration0": constraints.positive,
43
+ }
44
+ support = constraints.unit_interval
45
+ has_rsample = True
46
+
47
+ def __init__(self, concentration1, concentration0, validate_args=None):
48
+ self.concentration1, self.concentration0 = broadcast_all(
49
+ concentration1, concentration0
50
+ )
51
+ finfo = torch.finfo(self.concentration0.dtype)
52
+ base_dist = Uniform(
53
+ torch.full_like(self.concentration0, 0),
54
+ torch.full_like(self.concentration0, 1),
55
+ validate_args=validate_args,
56
+ )
57
+ transforms = [
58
+ PowerTransform(exponent=self.concentration0.reciprocal()),
59
+ AffineTransform(loc=1.0, scale=-1.0),
60
+ PowerTransform(exponent=self.concentration1.reciprocal()),
61
+ ]
62
+ super().__init__(base_dist, transforms, validate_args=validate_args)
63
+
64
+ def expand(self, batch_shape, _instance=None):
65
+ new = self._get_checked_instance(Kumaraswamy, _instance)
66
+ new.concentration1 = self.concentration1.expand(batch_shape)
67
+ new.concentration0 = self.concentration0.expand(batch_shape)
68
+ return super().expand(batch_shape, _instance=new)
69
+
70
+ @property
71
+ def mean(self):
72
+ return _moments(self.concentration1, self.concentration0, 1)
73
+
74
+ @property
75
+ def mode(self):
76
+ # Evaluate in log-space for numerical stability.
77
+ log_mode = (
78
+ self.concentration0.reciprocal() * (-self.concentration0).log1p()
79
+ - (-self.concentration0 * self.concentration1).log1p()
80
+ )
81
+ log_mode[(self.concentration0 < 1) | (self.concentration1 < 1)] = nan
82
+ return log_mode.exp()
83
+
84
+ @property
85
+ def variance(self):
86
+ return _moments(self.concentration1, self.concentration0, 2) - torch.pow(
87
+ self.mean, 2
88
+ )
89
+
90
+ def entropy(self):
91
+ t1 = 1 - self.concentration1.reciprocal()
92
+ t0 = 1 - self.concentration0.reciprocal()
93
+ H0 = torch.digamma(self.concentration0 + 1) + euler_constant
94
+ return (
95
+ t0
96
+ + t1 * H0
97
+ - torch.log(self.concentration1)
98
+ - torch.log(self.concentration0)
99
+ )
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/laplace.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from numbers import Number
3
+
4
+ import torch
5
+ from torch.distributions import constraints
6
+ from torch.distributions.distribution import Distribution
7
+ from torch.distributions.utils import broadcast_all
8
+ from torch.types import _size
9
+
10
+
11
+ __all__ = ["Laplace"]
12
+
13
+
14
+ class Laplace(Distribution):
15
+ r"""
16
+ Creates a Laplace distribution parameterized by :attr:`loc` and :attr:`scale`.
17
+
18
+ Example::
19
+
20
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
21
+ >>> m = Laplace(torch.tensor([0.0]), torch.tensor([1.0]))
22
+ >>> m.sample() # Laplace distributed with loc=0, scale=1
23
+ tensor([ 0.1046])
24
+
25
+ Args:
26
+ loc (float or Tensor): mean of the distribution
27
+ scale (float or Tensor): scale of the distribution
28
+ """
29
+ arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
30
+ support = constraints.real
31
+ has_rsample = True
32
+
33
+ @property
34
+ def mean(self):
35
+ return self.loc
36
+
37
+ @property
38
+ def mode(self):
39
+ return self.loc
40
+
41
+ @property
42
+ def variance(self):
43
+ return 2 * self.scale.pow(2)
44
+
45
+ @property
46
+ def stddev(self):
47
+ return (2**0.5) * self.scale
48
+
49
+ def __init__(self, loc, scale, validate_args=None):
50
+ self.loc, self.scale = broadcast_all(loc, scale)
51
+ if isinstance(loc, Number) and isinstance(scale, Number):
52
+ batch_shape = torch.Size()
53
+ else:
54
+ batch_shape = self.loc.size()
55
+ super().__init__(batch_shape, validate_args=validate_args)
56
+
57
+ def expand(self, batch_shape, _instance=None):
58
+ new = self._get_checked_instance(Laplace, _instance)
59
+ batch_shape = torch.Size(batch_shape)
60
+ new.loc = self.loc.expand(batch_shape)
61
+ new.scale = self.scale.expand(batch_shape)
62
+ super(Laplace, new).__init__(batch_shape, validate_args=False)
63
+ new._validate_args = self._validate_args
64
+ return new
65
+
66
+ def rsample(self, sample_shape: _size = torch.Size()) -> torch.Tensor:
67
+ shape = self._extended_shape(sample_shape)
68
+ finfo = torch.finfo(self.loc.dtype)
69
+ if torch._C._get_tracing_state():
70
+ # [JIT WORKAROUND] lack of support for .uniform_()
71
+ u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1
72
+ return self.loc - self.scale * u.sign() * torch.log1p(
73
+ -u.abs().clamp(min=finfo.tiny)
74
+ )
75
+ u = self.loc.new(shape).uniform_(finfo.eps - 1, 1)
76
+ # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
77
+ # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
78
+ return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
79
+
80
+ def log_prob(self, value):
81
+ if self._validate_args:
82
+ self._validate_sample(value)
83
+ return -torch.log(2 * self.scale) - torch.abs(value - self.loc) / self.scale
84
+
85
+ def cdf(self, value):
86
+ if self._validate_args:
87
+ self._validate_sample(value)
88
+ return 0.5 - 0.5 * (value - self.loc).sign() * torch.expm1(
89
+ -(value - self.loc).abs() / self.scale
90
+ )
91
+
92
+ def icdf(self, value):
93
+ term = value - 0.5
94
+ return self.loc - self.scale * (term).sign() * torch.log1p(-2 * term.abs())
95
+
96
+ def entropy(self):
97
+ return 1 + torch.log(2 * self.scale)
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/lkj_cholesky.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """
3
+ This closely follows the implementation in NumPyro (https://github.com/pyro-ppl/numpyro).
4
+
5
+ Original copyright notice:
6
+
7
+ # Copyright: Contributors to the Pyro project.
8
+ # SPDX-License-Identifier: Apache-2.0
9
+ """
10
+
11
+ import math
12
+
13
+ import torch
14
+ from torch.distributions import Beta, constraints
15
+ from torch.distributions.distribution import Distribution
16
+ from torch.distributions.utils import broadcast_all
17
+
18
+
19
+ __all__ = ["LKJCholesky"]
20
+
21
+
22
+ class LKJCholesky(Distribution):
23
+ r"""
24
+ LKJ distribution for lower Cholesky factor of correlation matrices.
25
+ The distribution is controlled by ``concentration`` parameter :math:`\eta`
26
+ to make the probability of the correlation matrix :math:`M` generated from
27
+ a Cholesky factor proportional to :math:`\det(M)^{\eta - 1}`. Because of that,
28
+ when ``concentration == 1``, we have a uniform distribution over Cholesky
29
+ factors of correlation matrices::
30
+
31
+ L ~ LKJCholesky(dim, concentration)
32
+ X = L @ L' ~ LKJCorr(dim, concentration)
33
+
34
+ Note that this distribution samples the
35
+ Cholesky factor of correlation matrices and not the correlation matrices
36
+ themselves and thereby differs slightly from the derivations in [1] for
37
+ the `LKJCorr` distribution. For sampling, this uses the Onion method from
38
+ [1] Section 3.
39
+
40
+ Example::
41
+
42
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
43
+ >>> l = LKJCholesky(3, 0.5)
44
+ >>> l.sample() # l @ l.T is a sample of a correlation 3x3 matrix
45
+ tensor([[ 1.0000, 0.0000, 0.0000],
46
+ [ 0.3516, 0.9361, 0.0000],
47
+ [-0.1899, 0.4748, 0.8593]])
48
+
49
+ Args:
50
+ dimension (dim): dimension of the matrices
51
+ concentration (float or Tensor): concentration/shape parameter of the
52
+ distribution (often referred to as eta)
53
+
54
+ **References**
55
+
56
+ [1] `Generating random correlation matrices based on vines and extended onion method` (2009),
57
+ Daniel Lewandowski, Dorota Kurowicka, Harry Joe.
58
+ Journal of Multivariate Analysis. 100. 10.1016/j.jmva.2009.04.008
59
+ """
60
+ arg_constraints = {"concentration": constraints.positive}
61
+ support = constraints.corr_cholesky
62
+
63
+ def __init__(self, dim, concentration=1.0, validate_args=None):
64
+ if dim < 2:
65
+ raise ValueError(
66
+ f"Expected dim to be an integer greater than or equal to 2. Found dim={dim}."
67
+ )
68
+ self.dim = dim
69
+ (self.concentration,) = broadcast_all(concentration)
70
+ batch_shape = self.concentration.size()
71
+ event_shape = torch.Size((dim, dim))
72
+ # This is used to draw vectorized samples from the beta distribution in Sec. 3.2 of [1].
73
+ marginal_conc = self.concentration + 0.5 * (self.dim - 2)
74
+ offset = torch.arange(
75
+ self.dim - 1,
76
+ dtype=self.concentration.dtype,
77
+ device=self.concentration.device,
78
+ )
79
+ offset = torch.cat([offset.new_zeros((1,)), offset])
80
+ beta_conc1 = offset + 0.5
81
+ beta_conc0 = marginal_conc.unsqueeze(-1) - 0.5 * offset
82
+ self._beta = Beta(beta_conc1, beta_conc0)
83
+ super().__init__(batch_shape, event_shape, validate_args)
84
+
85
+ def expand(self, batch_shape, _instance=None):
86
+ new = self._get_checked_instance(LKJCholesky, _instance)
87
+ batch_shape = torch.Size(batch_shape)
88
+ new.dim = self.dim
89
+ new.concentration = self.concentration.expand(batch_shape)
90
+ new._beta = self._beta.expand(batch_shape + (self.dim,))
91
+ super(LKJCholesky, new).__init__(
92
+ batch_shape, self.event_shape, validate_args=False
93
+ )
94
+ new._validate_args = self._validate_args
95
+ return new
96
+
97
+ def sample(self, sample_shape=torch.Size()):
98
+ # This uses the Onion method, but there are a few differences from [1] Sec. 3.2:
99
+ # - This vectorizes the for loop and also works for heterogeneous eta.
100
+ # - Same algorithm generalizes to n=1.
101
+ # - The procedure is simplified since we are sampling the cholesky factor of
102
+ # the correlation matrix instead of the correlation matrix itself. As such,
103
+ # we only need to generate `w`.
104
+ y = self._beta.sample(sample_shape).unsqueeze(-1)
105
+ u_normal = torch.randn(
106
+ self._extended_shape(sample_shape), dtype=y.dtype, device=y.device
107
+ ).tril(-1)
108
+ u_hypersphere = u_normal / u_normal.norm(dim=-1, keepdim=True)
109
+ # Replace NaNs in first row
110
+ u_hypersphere[..., 0, :].fill_(0.0)
111
+ w = torch.sqrt(y) * u_hypersphere
112
+ # Fill diagonal elements; clamp for numerical stability
113
+ eps = torch.finfo(w.dtype).tiny
114
+ diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt()
115
+ w += torch.diag_embed(diag_elems)
116
+ return w
117
+
118
+ def log_prob(self, value):
119
+ # See: https://mc-stan.org/docs/2_25/functions-reference/cholesky-lkj-correlation-distribution.html
120
+ # The probability of a correlation matrix is proportional to
121
+ # determinant ** (concentration - 1) = prod(L_ii ^ 2(concentration - 1))
122
+ # Additionally, the Jacobian of the transformation from Cholesky factor to
123
+ # correlation matrix is:
124
+ # prod(L_ii ^ (D - i))
125
+ # So the probability of a Cholesky factor is propotional to
126
+ # prod(L_ii ^ (2 * concentration - 2 + D - i)) = prod(L_ii ^ order_i)
127
+ # with order_i = 2 * concentration - 2 + D - i
128
+ if self._validate_args:
129
+ self._validate_sample(value)
130
+ diag_elems = value.diagonal(dim1=-1, dim2=-2)[..., 1:]
131
+ order = torch.arange(2, self.dim + 1, device=self.concentration.device)
132
+ order = 2 * (self.concentration - 1).unsqueeze(-1) + self.dim - order
133
+ unnormalized_log_pdf = torch.sum(order * diag_elems.log(), dim=-1)
134
+ # Compute normalization constant (page 1999 of [1])
135
+ dm1 = self.dim - 1
136
+ alpha = self.concentration + 0.5 * dm1
137
+ denominator = torch.lgamma(alpha) * dm1
138
+ numerator = torch.mvlgamma(alpha - 0.5, dm1)
139
+ # pi_constant in [1] is D * (D - 1) / 4 * log(pi)
140
+ # pi_constant in multigammaln is (D - 1) * (D - 2) / 4 * log(pi)
141
+ # hence, we need to add a pi_constant = (D - 1) * log(pi) / 2
142
+ pi_constant = 0.5 * dm1 * math.log(math.pi)
143
+ normalize_term = pi_constant + numerator - denominator
144
+ return unnormalized_log_pdf - normalize_term
infer_4_30_0/lib/python3.10/site-packages/torch/distributions/log_normal.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from torch.distributions import constraints
3
+ from torch.distributions.normal import Normal
4
+ from torch.distributions.transformed_distribution import TransformedDistribution
5
+ from torch.distributions.transforms import ExpTransform
6
+
7
+
8
+ __all__ = ["LogNormal"]
9
+
10
+
11
+ class LogNormal(TransformedDistribution):
12
+ r"""
13
+ Creates a log-normal distribution parameterized by
14
+ :attr:`loc` and :attr:`scale` where::
15
+
16
+ X ~ Normal(loc, scale)
17
+ Y = exp(X) ~ LogNormal(loc, scale)
18
+
19
+ Example::
20
+
21
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
22
+ >>> m = LogNormal(torch.tensor([0.0]), torch.tensor([1.0]))
23
+ >>> m.sample() # log-normal distributed with mean=0 and stddev=1
24
+ tensor([ 0.1046])
25
+
26
+ Args:
27
+ loc (float or Tensor): mean of log of distribution
28
+ scale (float or Tensor): standard deviation of log of the distribution
29
+ """
30
+ arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
31
+ support = constraints.positive
32
+ has_rsample = True
33
+
34
+ def __init__(self, loc, scale, validate_args=None):
35
+ base_dist = Normal(loc, scale, validate_args=validate_args)
36
+ super().__init__(base_dist, ExpTransform(), validate_args=validate_args)
37
+
38
+ def expand(self, batch_shape, _instance=None):
39
+ new = self._get_checked_instance(LogNormal, _instance)
40
+ return super().expand(batch_shape, _instance=new)
41
+
42
+ @property
43
+ def loc(self):
44
+ return self.base_dist.loc
45
+
46
+ @property
47
+ def scale(self):
48
+ return self.base_dist.scale
49
+
50
+ @property
51
+ def mean(self):
52
+ return (self.loc + self.scale.pow(2) / 2).exp()
53
+
54
+ @property
55
+ def mode(self):
56
+ return (self.loc - self.scale.square()).exp()
57
+
58
+ @property
59
+ def variance(self):
60
+ scale_sq = self.scale.pow(2)
61
+ return scale_sq.expm1() * (2 * self.loc + scale_sq).exp()
62
+
63
+ def entropy(self):
64
+ return self.base_dist.entropy() + self.loc