ZTWHHH commited on
Commit
72ef79c
·
verified ·
1 Parent(s): d1d3e83

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. deepseek/lib/python3.10/site-packages/xformers/_C.so +3 -0
  3. evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc +0 -0
  4. evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc +0 -0
  5. evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc +0 -0
  6. evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc +0 -0
  7. evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc +0 -0
  8. evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc +0 -0
  9. evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc +0 -0
  10. evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc +0 -0
  11. evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc +0 -0
  12. evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc +0 -0
  13. evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__init__.py +55 -0
  14. evalkit_tf437/lib/python3.10/site-packages/torch/fx/config.py +6 -0
  15. evalkit_tf437/lib/python3.10/site-packages/torch/fx/graph_module.py +867 -0
  16. evalkit_tf437/lib/python3.10/site-packages/torch/fx/immutable_collections.py +54 -0
  17. evalkit_tf437/lib/python3.10/site-packages/torch/fx/operator_schemas.py +440 -0
  18. evalkit_tf437/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc +3 -0
  19. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimd.c +27 -0
  20. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimddp.c +16 -0
  21. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdfhm.c +19 -0
  22. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdhp.c +15 -0
  23. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx.c +20 -0
  24. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx2.c +20 -0
  25. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_clx.c +22 -0
  26. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c +24 -0
  27. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_icl.c +26 -0
  28. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knl.c +25 -0
  29. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knm.c +30 -0
  30. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_skx.c +26 -0
  31. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_spr.c +26 -0
  32. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512cd.c +20 -0
  33. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512f.c +20 -0
  34. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_f16c.c +22 -0
  35. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma3.c +22 -0
  36. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma4.c +13 -0
  37. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon.c +19 -0
  38. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_fp16.c +11 -0
  39. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c +21 -0
  40. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_popcnt.c +32 -0
  41. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse.c +20 -0
  42. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse2.c +20 -0
  43. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse3.c +20 -0
  44. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse41.c +20 -0
  45. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse42.c +20 -0
  46. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_ssse3.c +20 -0
  47. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx2.c +13 -0
  48. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx3.c +13 -0
  49. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx4.c +14 -0
  50. falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vx.c +16 -0
.gitattributes CHANGED
@@ -568,3 +568,6 @@ falcon/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expans
568
  falcon/lib/python3.10/site-packages/PIL/_imagingmath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
569
  falcon/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_datetime.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
570
  falcon/lib/python3.10/site-packages/psutil/_psutil_linux.abi3.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
568
  falcon/lib/python3.10/site-packages/PIL/_imagingmath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
569
  falcon/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_datetime.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
570
  falcon/lib/python3.10/site-packages/psutil/_psutil_linux.abi3.so filter=lfs diff=lfs merge=lfs -text
571
+ evalkit_tf437/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
572
+ deepseek/lib/python3.10/site-packages/xformers/_C.so filter=lfs diff=lfs merge=lfs -text
573
+ falcon/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
deepseek/lib/python3.10/site-packages/xformers/_C.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f09f83477c28599853978c2731663e517f27fba277141ff74d6acc1ea5c60cb
3
+ size 50942528
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.61 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc ADDED
Binary file (68.2 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc ADDED
Binary file (5.57 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc ADDED
Binary file (3.56 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc ADDED
Binary file (2.63 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc ADDED
Binary file (480 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc ADDED
Binary file (72 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc ADDED
Binary file (29.4 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc ADDED
Binary file (39.4 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__init__.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+
3
+ import torch._C._lazy
4
+ from torch.utils._pytree import tree_flatten, tree_unflatten
5
+
6
+ from .closure import add_step_closure, run_step_closures
7
+
8
+
9
+ def mark_step(device: str = "", wait=False):
10
+ """Triggers a mark step, which amounts to
11
+ - collecting a group of 'live' lazy tensors to index into the compilation cache
12
+ (lowering/compiling their IR graphs if not cached)
13
+ - kicking off execution of the compiled function
14
+ - (optionally, wait=True) waiting for cpu-side execution to complete (does not sync the accelerator)
15
+ """
16
+ # TODO(whc) expand this to include backend hooks and align with XLA backend needs
17
+ torch._C._lazy._mark_step(device, [], wait=wait)
18
+
19
+ run_step_closures()
20
+
21
+
22
+ def wait_device_ops(devices=None):
23
+ """Waits for all the async operations on the given devices to complete.
24
+ Args:
25
+ devices (string..., optional): The devices whose async ops need to be waited
26
+ for. If empty, all the local devices will be waited for.
27
+ """
28
+ if devices is None:
29
+ devices = []
30
+ torch._C._lazy._wait_device_ops(devices=devices)
31
+
32
+
33
+ def sync_multi(tensors, devices):
34
+ """
35
+ Sync the list of lazy tensors so there IR get lowered for the activate backend
36
+ and the compiled computation graph get cached.
37
+ """
38
+ torch._C._lazy._sync_multi(tensors, devices)
39
+
40
+
41
+ def get_tensor_id(tensor):
42
+ """Return a unique id of the lazy tensor maintained by LTC"""
43
+ return torch._C._lazy._get_tensor_id(tensor)
44
+
45
+
46
+ def to_cpu(tensors, devices=None):
47
+ devices = devices or ["lazy"]
48
+
49
+ flattened, spec = tree_flatten(tensors)
50
+ sync_multi(flattened, devices)
51
+ return tree_unflatten([t.to("cpu") for t in flattened], spec)
52
+
53
+
54
+ def save(tensors, *args, **kwargs):
55
+ torch.save(to_cpu(tensors), *args, **kwargs)
evalkit_tf437/lib/python3.10/site-packages/torch/fx/config.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Whether to disable showing progress on compilation passes
2
+ # Need to add a new config otherwise wil get a circular import if dynamo config is imported here
3
+ disable_progress = True
4
+
5
+ # If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy
6
+ verbose_progress = False
evalkit_tf437/lib/python3.10/site-packages/torch/fx/graph_module.py ADDED
@@ -0,0 +1,867 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import itertools
3
+ import linecache
4
+ import os
5
+ import sys
6
+ import traceback
7
+ import warnings
8
+ from pathlib import Path
9
+ from typing import Any, Callable, Dict, List, Optional, Set, Type, Union
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.overrides
14
+ from torch.nn.modules.module import _addindent
15
+ from torch.package import Importer, PackageExporter, PackageImporter, sys_importer
16
+
17
+ from ._compatibility import compatibility
18
+ from .graph import _custom_builtins, _is_from_torch, _PyTreeCodeGen, Graph, PythonCode
19
+
20
+ __all__ = [
21
+ "reduce_graph_module",
22
+ "reduce_package_graph_module",
23
+ "reduce_deploy_graph_module",
24
+ "GraphModule",
25
+ ]
26
+
27
+ _USER_PRESERVED_ATTRIBUTES_KEY = "_user_preserved_attributes"
28
+
29
+ # Normal exec loses the source code, however we can work with
30
+ # the linecache module to recover it.
31
+ # Using _exec_with_source will add it to our local cache
32
+ # and then tools like TorchScript will be able to get source info.
33
+ class _EvalCacheLoader:
34
+ def __init__(self):
35
+ self.eval_cache = {}
36
+ self.next_id = 0
37
+
38
+ def cache(self, src: str, globals: Dict[str, Any], co_fields=None):
39
+ """Store the source in a private cache, and add a lazy entry in linecache
40
+ that allows the source to be retrieved by 'filename'.
41
+
42
+ Args:
43
+ src (str): The module source to cache
44
+ globals (dict): The module globals
45
+
46
+ Returns:
47
+ str: The cache key (and dummy filename) generated for src.
48
+ """
49
+
50
+ key = self._get_key()
51
+ if co_fields:
52
+ key += f" from {co_fields['co_filename']}:{co_fields['co_firstlineno']} in {co_fields['co_name']}"
53
+ self.eval_cache[key] = src
54
+
55
+ # Don't mutate globals so that this loader is only used
56
+ # to populate linecache, and doesn't interact with other modules
57
+ # that might check `__loader__`
58
+ globals_copy = globals.copy()
59
+ globals_copy["__file__"] = key
60
+ globals_copy["__name__"] = key
61
+ globals_copy["__loader__"] = self
62
+ linecache.lazycache(key, globals_copy)
63
+
64
+ return key
65
+
66
+ # Part of the loader protocol (PEP 302)
67
+ # linecache will use this method when trying to find source code
68
+ def get_source(self, module_name) -> Optional[str]:
69
+ if module_name in self.eval_cache:
70
+ return self.eval_cache[module_name]
71
+ return None
72
+
73
+ def _get_key(self):
74
+ key = f"<eval_with_key>.{self.next_id}"
75
+ self.next_id += 1
76
+ return key
77
+
78
+
79
+ _loader = _EvalCacheLoader()
80
+
81
+
82
+ def _exec_with_source(src: str, globals: Dict[str, Any], co_fields=None):
83
+ key = _loader.cache(src, globals, co_fields)
84
+ exec(compile(src, key, "exec"), globals)
85
+
86
+
87
+ def _forward_from_src(src: str, globals: Dict[str, Any], co_fields=None):
88
+ return _method_from_src(
89
+ method_name="forward", src=src, globals=globals, co_fields=co_fields
90
+ )
91
+
92
+
93
+ def _method_from_src(
94
+ method_name: str, src: str, globals: Dict[str, Any], co_fields=None
95
+ ) -> Callable:
96
+ # avoid mutating the passed in dict
97
+ globals_copy = globals.copy()
98
+ _exec_with_source(src, globals_copy, co_fields)
99
+ fn = globals_copy[method_name]
100
+ del globals_copy[method_name]
101
+ return fn
102
+
103
+
104
+ def _format_import_statement(name: str, obj: Any, importer: Importer) -> str:
105
+ if name in _custom_builtins:
106
+ return _custom_builtins[name].import_str
107
+ if _is_from_torch(name):
108
+ return "import torch"
109
+ module_name, attr_name = importer.get_name(obj)
110
+ return f"from {module_name} import {attr_name} as {name}"
111
+
112
+
113
+ def _format_import_block(globals: Dict[str, Any], importer: Importer):
114
+ import_strs: Set[str] = set()
115
+ for name, obj in globals.items():
116
+ import_strs.add(_format_import_statement(name, obj, importer))
117
+ # Sort the imports so we have a stable import block that allows us to
118
+ # hash the graph module and get a consistent key for use in a cache.
119
+ return "\n".join(sorted(import_strs))
120
+
121
+
122
+ @compatibility(is_backward_compatible=True)
123
+ def reduce_graph_module(body: Dict[Any, Any], import_block: str) -> torch.nn.Module:
124
+ # BC: attribute name was changed from `code` to `_code` to facilitate
125
+ # making `code` into a property and adding a docstring to it
126
+ fn_src = body.get("_code") or body["code"]
127
+ forward = _forward_from_src(import_block + fn_src, {})
128
+ return _deserialize_graph_module(forward, body)
129
+
130
+
131
+ @compatibility(is_backward_compatible=True)
132
+ def reduce_package_graph_module(
133
+ importer: PackageImporter, body: Dict[Any, Any], generated_module_name: str
134
+ ) -> torch.nn.Module:
135
+ forward = importer.import_module(generated_module_name).forward
136
+ return _deserialize_graph_module(forward, body)
137
+
138
+
139
+ @compatibility(is_backward_compatible=True)
140
+ def reduce_deploy_graph_module(
141
+ importer: PackageImporter, body: Dict[Any, Any], import_block: str
142
+ ) -> torch.nn.Module:
143
+ ns = {}
144
+ ns["__builtins__"] = importer.patched_builtins
145
+ fn_src = body.get("_code")
146
+ assert fn_src is not None
147
+ forward = _forward_from_src(import_block + fn_src, ns)
148
+ return _deserialize_graph_module(forward, body)
149
+
150
+
151
+ # We create a dummy class here because symbolic_trace pulls the forward()
152
+ # function off of the class, rather than the instance. This class is used
153
+ # in _deserialize_graph_module() below.
154
+ class _CodeOnlyModule(torch.nn.Module):
155
+ def __init__(self, body):
156
+ super().__init__()
157
+ self.__dict__ = body
158
+
159
+
160
+ def _deserialize_graph_module(forward, body: Dict[Any, Any], graph_module_cls=None) -> torch.nn.Module:
161
+ """
162
+ Deserialize a GraphModule given the dictionary of the original module,
163
+ using the code to reconstruct the graph. We delete the actual graph before
164
+ saving the dictionary so that changes to the in-memory graph format do not
165
+ get serialized.
166
+ """
167
+
168
+ # Try to retrieve the forward source in a backward-compatible way
169
+ _CodeOnlyModule.forward = forward
170
+
171
+ tracer_cls = body.get("_tracer_cls")
172
+ if tracer_cls is None:
173
+ from ._symbolic_trace import Tracer
174
+
175
+ tracer_cls = Tracer
176
+
177
+ graphmodule_cls_name = body.get("_graphmodule_cls_name", "GraphModule")
178
+
179
+ # This is a workaround for a mypy linter issue related to
180
+ # passing base class as an argument - https://github.com/python/mypy/issues/5865.
181
+ cls_tracer: Any = tracer_cls
182
+
183
+ class KeepModules(cls_tracer):
184
+ # we shouldn't trace into any of the submodules,
185
+ # because they were not traced in the original GraphModule
186
+ def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:
187
+ return True
188
+
189
+ com = _CodeOnlyModule(body)
190
+
191
+ tracer_extras = body.get("_tracer_extras", {})
192
+ graph = KeepModules().trace(com, **tracer_extras)
193
+
194
+ # Manually set Tracer class on the reconstructed Graph, to avoid
195
+ # referencing the private local subclass KeepModules.
196
+ graph._tracer_cls = tracer_cls
197
+ if graph_module_cls is None:
198
+ graph_module_cls = GraphModule
199
+ gm = graph_module_cls(com, graph, class_name=graphmodule_cls_name)
200
+
201
+ # The GraphModule constructor only retains attributes referenced by the graph.
202
+ # In this case, our goal is return a GraphModule as close to identical as the one
203
+ # put into the package. If any additional attributes were present in body,
204
+ # we should keep them.
205
+ for k, v in body.items():
206
+ if not hasattr(gm, k):
207
+ setattr(gm, k, v)
208
+ return gm
209
+
210
+
211
+ # copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
212
+ # This installs empty Modules where none exist yet if they are subpaths of target
213
+ def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str):
214
+ *prefix, field = target.split(".")
215
+ for item in prefix:
216
+ f = getattr(from_module, item)
217
+ t = getattr(to_module, item, None)
218
+ if f is t:
219
+ # we have already installed one of its parents
220
+ # (e.g. target = root.linear.weight, but we have already installed root.linear)
221
+ # once we install a parent, we no longer need to copy the children
222
+ # since all the needed properties will already be present
223
+ return
224
+
225
+ if t is None:
226
+ t = torch.nn.Module()
227
+ setattr(to_module, item, t)
228
+ from_module, to_module = f, t
229
+
230
+ orig = getattr(from_module, field)
231
+ # If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
232
+ # So, we register it as a named buffer in the target module.
233
+ if isinstance(orig, torch.Tensor) and not isinstance(orig, torch.nn.Parameter):
234
+ to_module.register_buffer(field, orig)
235
+ else:
236
+ setattr(to_module, field, orig)
237
+
238
+
239
+ # Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
240
+ # This installs empty Modules where none exist yet if they are subpaths of target
241
+ def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str):
242
+ *prefix, field = target.split(".")
243
+ for item in prefix:
244
+ t = getattr(to_module, item, None)
245
+
246
+ if t is None:
247
+ t = torch.nn.Module()
248
+ setattr(to_module, item, t)
249
+ to_module = t
250
+
251
+ # If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
252
+ # So, we register it as a named buffer in the target module.
253
+ if isinstance(from_obj, torch.Tensor) and not isinstance(
254
+ from_obj, torch.nn.Parameter
255
+ ):
256
+ to_module.register_buffer(field, from_obj)
257
+ else:
258
+ setattr(to_module, field, from_obj)
259
+
260
+
261
+ class _WrappedCall:
262
+ def __init__(self, cls, cls_call):
263
+ self.cls = cls
264
+ self.cls_call = cls_call
265
+
266
+ # Previously, if an error occurred when valid
267
+ # symbolically-traced code was run with an invalid input, the
268
+ # user would see the source of the error as coming from
269
+ # `File "<eval_with_key_N">`, where N is some number. We use
270
+ # this function to generate a more informative error message. We
271
+ # return the traceback itself, a message explaining that the
272
+ # error occurred in a traced Module's generated forward
273
+ # function, and five lines of context surrounding the faulty
274
+ # line
275
+ @staticmethod
276
+ def _generate_error_message(frame_summary: traceback.FrameSummary) -> str:
277
+ # auxiliary variables (for readability)
278
+ err_lineno = frame_summary.lineno
279
+ assert err_lineno is not None
280
+ line = frame_summary.line
281
+ assert line is not None
282
+ err_line_len = len(line)
283
+ all_src_lines = linecache.getlines(frame_summary.filename)
284
+
285
+ # constituent substrings of the error message
286
+ tb_repr = traceback.format_exc()
287
+ custom_msg = (
288
+ "Call using an FX-traced Module, "
289
+ f"line {err_lineno} of the traced Module's "
290
+ "generated forward function:"
291
+ )
292
+ before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno])
293
+ marker = "~" * err_line_len + "~~~ <--- HERE"
294
+ err_and_after_err = "\n".join(all_src_lines[err_lineno : err_lineno + 2])
295
+
296
+ # joined message
297
+ return "\n".join([tb_repr, custom_msg, before_err, marker, err_and_after_err])
298
+
299
+ def __call__(self, obj, *args, **kwargs):
300
+ try:
301
+ if self.cls_call is not None:
302
+ return self.cls_call(obj, *args, **kwargs)
303
+ else:
304
+ return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc]
305
+ except Exception as e:
306
+ assert e.__traceback__
307
+ topmost_framesummary: traceback.FrameSummary = (
308
+ traceback.StackSummary.extract(traceback.walk_tb(e.__traceback__))[-1]
309
+ ) # type: ignore[arg-type]
310
+ if "eval_with_key" in topmost_framesummary.filename:
311
+ print(
312
+ _WrappedCall._generate_error_message(topmost_framesummary),
313
+ file=sys.stderr,
314
+ )
315
+ raise e.with_traceback(None) # noqa: TRY200
316
+ else:
317
+ raise e
318
+
319
+
320
+ @compatibility(is_backward_compatible=True)
321
+ class GraphModule(torch.nn.Module):
322
+ """
323
+ GraphModule is an nn.Module generated from an fx.Graph. Graphmodule has a
324
+ ``graph`` attribute, as well as ``code`` and ``forward`` attributes generated
325
+ from that ``graph``.
326
+
327
+ .. warning::
328
+
329
+ When ``graph`` is reassigned, ``code`` and ``forward`` will be automatically
330
+ regenerated. However, if you edit the contents of the ``graph`` without reassigning
331
+ the ``graph`` attribute itself, you must call ``recompile()`` to update the generated
332
+ code.
333
+ """
334
+
335
+ def __new__(cls: "Type[GraphModule]", *args, **kwargs):
336
+ # each instance of a graph module needs its own forward method
337
+ # so create a new singleton class for each instance.
338
+ # it is a subclass of the user-defined class, the only difference
339
+ # is an extra layer to install the forward method
340
+
341
+ # address issue described at https://github.com/pytorch/pytorch/issues/63883
342
+ # in other words, traverse class hierarchy to fix the redundant class definition problem
343
+ for t in cls.__mro__:
344
+ c = t.__qualname__.split(".")[-1]
345
+ if c != "GraphModuleImpl":
346
+ cls = t
347
+ break
348
+
349
+ class GraphModuleImpl(cls): # type: ignore[misc, valid-type]
350
+ pass
351
+
352
+ return super().__new__(GraphModuleImpl)
353
+
354
+ @compatibility(is_backward_compatible=True)
355
+ def __init__(
356
+ self,
357
+ root: Union[torch.nn.Module, Dict[str, Any]],
358
+ graph: Graph,
359
+ class_name: str = "GraphModule",
360
+ ):
361
+ """
362
+ Construct a GraphModule.
363
+
364
+ Args:
365
+
366
+ root (Union[torch.nn.Module, Dict[str, Any]):
367
+ ``root`` can either be an nn.Module instance or a Dict mapping strings to any attribute type.
368
+ In the case that ``root`` is a Module, any references to Module-based objects (via qualified
369
+ name) in the Graph's Nodes' ``target`` field will be copied over from the respective place
370
+ within ``root``'s Module hierarchy into the GraphModule's module hierarchy.
371
+ In the case that ``root`` is a dict, the qualified name found in a Node's ``target`` will be
372
+ looked up directly in the dict's keys. The object mapped to by the Dict will be copied
373
+ over into the appropriate place within the GraphModule's module hierarchy.
374
+
375
+ graph (Graph): ``graph`` contains the nodes this GraphModule should use for code generation
376
+
377
+ class_name (str): ``name`` denotes the name of this GraphModule for debugging purposes. If it's unset, all
378
+ error messages will report as originating from ``GraphModule``. It may be helpful to set this
379
+ to ``root``'s original name or a name that makes sense within the context of your transform.
380
+ """
381
+ super().__init__()
382
+ self.__class__.__name__ = class_name
383
+ if isinstance(root, torch.nn.Module):
384
+ if hasattr(root, "training"):
385
+ self.training = root.training
386
+
387
+ # When we pickle/unpickle graph module, we don't want to drop any module or attributes.
388
+ if isinstance(root, _CodeOnlyModule):
389
+ for k, _ in root.named_children():
390
+ _copy_attr(root, self, k)
391
+
392
+ for k, _ in root.named_buffers():
393
+ _copy_attr(root, self, k)
394
+
395
+ for k, _ in root.named_parameters():
396
+ _copy_attr(root, self, k)
397
+
398
+ for node in graph.nodes:
399
+ if node.op in ["get_attr", "call_module"]:
400
+ assert isinstance(node.target, str)
401
+ _copy_attr(root, self, node.target)
402
+ elif isinstance(root, dict):
403
+ targets_to_copy = []
404
+ for node in graph.nodes:
405
+ if node.op in ["get_attr", "call_module"]:
406
+ assert isinstance(node.target, str)
407
+ if node.target not in root:
408
+ raise RuntimeError(
409
+ "Node "
410
+ + str(node)
411
+ + " referenced target "
412
+ + node.target
413
+ + " but that target was not provided in ``root``!"
414
+ )
415
+ targets_to_copy.append(node.target)
416
+ # Sort targets in ascending order of the # of atoms.
417
+ # This will ensure that less deeply nested attributes are assigned
418
+ # before more deeply nested attributes. For example, foo.bar
419
+ # will be assigned before foo.bar.baz. Otherwise, we might assign
420
+ # the user-provided ``foo.bar`` and wipe out the previously-assigned
421
+ # ``foo.bar.baz``
422
+ targets_to_copy.sort(key=lambda t: t.count("."))
423
+ for target_to_copy in targets_to_copy:
424
+ _assign_attr(root[target_to_copy], self, target_to_copy)
425
+ else:
426
+ raise RuntimeError("Unsupported type " + str(root) + " passed for root!")
427
+
428
+ self.graph = graph
429
+
430
+ # Store the Tracer class responsible for creating a Graph separately as part of the
431
+ # GraphModule state, except when the Tracer is defined in a local namespace.
432
+ # Locally defined Tracers are not pickleable. This is needed because torch.package will
433
+ # serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer
434
+ # to re-create the Graph during deserialization.
435
+ self._tracer_cls = None
436
+ if (
437
+ self.graph._tracer_cls
438
+ and "<locals>" not in self.graph._tracer_cls.__qualname__
439
+ ):
440
+ self._tracer_cls = self.graph._tracer_cls
441
+
442
+ self._tracer_extras = {}
443
+ if self.graph._tracer_extras:
444
+ self._tracer_extras = self.graph._tracer_extras
445
+
446
+ # Dictionary to store metadata
447
+ self.meta: Dict[str, Any] = {}
448
+
449
+ # TorchScript breaks trying to compile the graph setter because of the
450
+ # continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842
451
+ #
452
+ # Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway
453
+ __jit_unused_properties__ = ["graph"]
454
+
455
+ @property
456
+ def graph(self) -> Graph:
457
+ """
458
+ Return the ``Graph`` underlying this ``GraphModule``
459
+ """
460
+ return self._graph
461
+
462
+ @graph.setter
463
+ def graph(self, g: Graph) -> None:
464
+ """
465
+ Set the underlying ``Graph`` for this ``GraphModule``. This will internally
466
+ recompile the ``GraphModule`` so that the generated ``forward()`` function
467
+ corresponds to ``g``
468
+ """
469
+ assert isinstance(g, Graph), f"Expected a Graph instance, but got {type(g)}"
470
+ self._graph = g
471
+ g.owning_module = self
472
+ self.recompile()
473
+
474
+ @compatibility(is_backward_compatible=False)
475
+ def to_folder(self, folder: Union[str, os.PathLike], module_name: str = "FxModule"):
476
+ """Dumps out module to ``folder`` with ``module_name`` so that it can be
477
+ imported with ``from <folder> import <module_name>``
478
+
479
+ Args:
480
+
481
+ folder (Union[str, os.PathLike]): The folder to write the code out to
482
+
483
+ module_name (str): Top-level name to use for the ``Module`` while
484
+ writing out the code
485
+ """
486
+ folder = Path(folder)
487
+ Path(folder).mkdir(exist_ok=True)
488
+ torch.save(self.state_dict(), folder / "state_dict.pt")
489
+ tab = " " * 4
490
+ custom_builtins = "\n".join([v.import_str for v in _custom_builtins.values()])
491
+ model_str = f"""
492
+ import torch
493
+ {custom_builtins}
494
+
495
+ from torch.nn import *
496
+ class {module_name}(torch.nn.Module):
497
+ def __init__(self):
498
+ super().__init__()
499
+ """
500
+
501
+ def _gen_model_repr(module_name: str, module: torch.nn.Module) -> Optional[str]:
502
+ safe_reprs = [
503
+ nn.Linear,
504
+ nn.Conv1d,
505
+ nn.Conv2d,
506
+ nn.Conv3d,
507
+ nn.BatchNorm1d,
508
+ nn.BatchNorm2d,
509
+ nn.BatchNorm3d,
510
+ ]
511
+ if type(module) in safe_reprs:
512
+ return f"{module.__repr__()}"
513
+ else:
514
+ return None
515
+
516
+ blobified_modules = []
517
+ for module_name, module in self.named_children():
518
+ module_str = _gen_model_repr(module_name, module)
519
+ if module_str is None:
520
+ module_file = folder / f"{module_name}.pt"
521
+ torch.save(module, module_file)
522
+ blobified_modules.append(module_name)
523
+ module_repr = module.__repr__().replace("\r", " ").replace("\n", " ")
524
+ module_str = f"torch.load(r'{module_file}') # {module_repr}"
525
+ model_str += f"{tab*2}self.{module_name} = {module_str}\n"
526
+
527
+ for buffer_name, buffer in self._buffers.items():
528
+ if buffer is None:
529
+ continue
530
+ model_str += f"{tab*2}self.register_buffer('{buffer_name}', torch.empty({list(buffer.shape)}, dtype={buffer.dtype}))\n"
531
+
532
+ for param_name, param in self._parameters.items():
533
+ if param is None:
534
+ continue
535
+ model_str += f"{tab*2}self.{param_name} = torch.nn.Parameter(torch.empty({list(param.shape)}, dtype={param.dtype}))\n"
536
+
537
+ model_str += (
538
+ f"{tab*2}self.load_state_dict(torch.load(r'{folder}/state_dict.pt'))\n"
539
+ )
540
+ model_str += f"{_addindent(self.code, 4)}\n"
541
+
542
+ module_file = folder / "module.py"
543
+ module_file.write_text(model_str)
544
+
545
+ init_file = folder / "__init__.py"
546
+ init_file.write_text("from .module import *")
547
+
548
+ if len(blobified_modules) > 0:
549
+ warnings.warn(
550
+ "Was not able to save the following children modules as reprs -"
551
+ f"saved as pickled files instead: {blobified_modules}"
552
+ )
553
+
554
+ @compatibility(is_backward_compatible=True)
555
+ def add_submodule(self, target: str, m: torch.nn.Module) -> bool:
556
+ """
557
+ Adds the given submodule to ``self``.
558
+
559
+ This installs empty Modules where none exist yet if they are
560
+ subpaths of ``target``.
561
+
562
+ Args:
563
+ target: The fully-qualified string name of the new submodule
564
+ (See example in ``nn.Module.get_submodule`` for how to
565
+ specify a fully-qualified string.)
566
+ m: The submodule itself; the actual object we want to
567
+ install in the current Module
568
+
569
+ Return:
570
+ bool: Whether or not the submodule could be inserted. For
571
+ this method to return True, each object in the chain
572
+ denoted by ``target`` must either a) not exist yet,
573
+ or b) reference an ``nn.Module`` (not a parameter or
574
+ other attribute)
575
+ """
576
+ *prefix, field = target.split(".")
577
+ mod: torch.nn.Module = self
578
+
579
+ for item in prefix:
580
+
581
+ submod = getattr(mod, item, None)
582
+
583
+ if submod is None:
584
+ submod = torch.nn.Module()
585
+ setattr(mod, item, submod)
586
+
587
+ if not isinstance(submod, torch.nn.Module):
588
+ return False
589
+
590
+ mod = submod
591
+
592
+ mod.add_module(field, m)
593
+ return True
594
+
595
+ @compatibility(is_backward_compatible=True)
596
+ def delete_submodule(self, target: str) -> bool:
597
+ """
598
+ Deletes the given submodule from ``self``.
599
+
600
+ The module will not be deleted if ``target`` is not a valid
601
+ target.
602
+
603
+ Args:
604
+ target: The fully-qualified string name of the new submodule
605
+ (See example in ``nn.Module.get_submodule`` for how to
606
+ specify a fully-qualified string.)
607
+
608
+ Returns:
609
+ bool: Whether or not the target string referenced a
610
+ submodule we want to delete. A return value of ``False``
611
+ means that the ``target`` was not a valid reference to
612
+ a submodule.
613
+ """
614
+ atoms = target.split(".")
615
+ path, target_submod = atoms[:-1], atoms[-1]
616
+ mod: torch.nn.Module = self
617
+
618
+ # Get the parent module
619
+ for item in path:
620
+
621
+ if not hasattr(mod, item):
622
+ return False
623
+
624
+ mod = getattr(mod, item)
625
+
626
+ if not isinstance(mod, torch.nn.Module):
627
+ return False
628
+
629
+ if not hasattr(mod, target_submod):
630
+ return False
631
+
632
+ if not isinstance(getattr(mod, target_submod), torch.nn.Module):
633
+ return False
634
+
635
+ delattr(mod, target_submod)
636
+ return True
637
+
638
+ @compatibility(is_backward_compatible=True)
639
+ def delete_all_unused_submodules(self) -> None:
640
+ """
641
+ Deletes all unused submodules from ``self``.
642
+
643
+ A Module is considered "used" if any one of the following is
644
+ true:
645
+ 1. It has children that are used
646
+ 2. Its forward is called directly via a ``call_module`` node
647
+ 3. It has a non-Module attribute that is used from a
648
+ ``get_attr`` node
649
+
650
+ This method can be called to clean up an ``nn.Module`` without
651
+ manually calling ``delete_submodule`` on each unused submodule.
652
+ """
653
+ used: List[str] = []
654
+
655
+ for node in self.graph.nodes:
656
+
657
+ if node.op == "call_module" or node.op == "get_attr":
658
+
659
+ # A list of strings representing the different parts
660
+ # of the path. For example, `foo.bar.baz` gives us
661
+ # ["foo", "bar", "baz"]
662
+ fullpath = node.target.split(".")
663
+
664
+ # If we're looking at multiple parts of a path, join
665
+ # join them with a dot. Otherwise, return that single
666
+ # element without doing anything to it.
667
+ def join_fn(x: str, y: str) -> str:
668
+ return ".".join([x, y] if y else [x])
669
+
670
+ # Progressively collect all the names of intermediate
671
+ # modules. For example, if we have the target
672
+ # `foo.bar.baz`, we'll add `foo`, `foo.bar`, and
673
+ # `foo.bar.baz` to the list.
674
+ for path in itertools.accumulate(fullpath, join_fn):
675
+ used.append(path)
676
+
677
+ # For a `call_module` node, also register all recursive submodules
678
+ # as used
679
+ if node.op == "call_module":
680
+ try:
681
+ submod = self.get_submodule(node.target)
682
+
683
+ for submod_name, _ in submod.named_modules():
684
+ if submod_name != "":
685
+ used.append(".".join([node.target, submod_name]))
686
+ except AttributeError:
687
+ # Node referenced nonexistent submodule, don't need to
688
+ # worry about GCing anything
689
+ pass
690
+
691
+ to_delete = [name for name, _ in self.named_modules() if name not in used]
692
+
693
+ for name in to_delete:
694
+ self.delete_submodule(name)
695
+
696
+ @property
697
+ def code(self) -> str:
698
+ """
699
+ Return the Python code generated from the ``Graph`` underlying this
700
+ ``GraphModule``.
701
+ """
702
+ if not hasattr(self, "_code"):
703
+ raise RuntimeError(
704
+ "Code has not been generated! Please report a bug to PyTorch"
705
+ )
706
+ return self._code
707
+
708
+ @compatibility(is_backward_compatible=True)
709
+ def recompile(self) -> PythonCode:
710
+ """
711
+ Recompile this GraphModule from its ``graph`` attribute. This should be
712
+ called after editing the contained ``graph``, otherwise the generated
713
+ code of this ``GraphModule`` will be out of date.
714
+ """
715
+ if isinstance(self._graph._codegen, _PyTreeCodeGen):
716
+ self._in_spec = self._graph._codegen.pytree_info.in_spec
717
+ self._out_spec = self._graph._codegen.pytree_info.out_spec
718
+ python_code = self._graph.python_code(root_module="self")
719
+ self._code = python_code.src
720
+ self._lineno_map = python_code._lineno_map
721
+
722
+ cls = type(self)
723
+ co_fields = self._graph._co_fields if hasattr(self._graph, "_co_fields") else {}
724
+ cls.forward = _forward_from_src(self._code, python_code.globals, co_fields)
725
+
726
+ # Determine whether this class explicitly defines a __call__ implementation
727
+ # to wrap. If it does, save it in order to have wrapped_call invoke it.
728
+ # If it does not, wrapped_call can use a dynamic call to super() instead.
729
+ # In most cases, super().__call__ should be torch.nn.Module.__call__.
730
+ # We do not want to hold a reference to Module.__call__ here; doing so will
731
+ # bypass patching of torch.nn.Module.__call__ done while symbolic tracing.
732
+ cls_call = cls.__call__ if "__call__" in vars(cls) else None
733
+
734
+ if "_wrapped_call" not in vars(cls):
735
+ cls._wrapped_call = _WrappedCall(cls, cls_call) # type: ignore[attr-defined]
736
+
737
+ def call_wrapped(self, *args, **kwargs):
738
+ return self._wrapped_call(self, *args, **kwargs)
739
+
740
+ cls.__call__ = call_wrapped # type: ignore[method-assign]
741
+
742
+ return python_code
743
+
744
+ # Passing Tracer as argument allows subclasses extending fx.GraphModule
745
+ # define their own Tracer (extending fx.Tracer).
746
+ def __reduce_deploy__(self, importer: Importer):
747
+ dict_without_graph = self.__dict__.copy()
748
+ dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__
749
+ del dict_without_graph["_graph"]
750
+
751
+ python_code = self.recompile()
752
+ import_block = _format_import_block(python_code.globals, importer)
753
+ return (reduce_deploy_graph_module, (dict_without_graph, import_block))
754
+
755
+ def __reduce_package__(self, exporter: PackageExporter):
756
+ dict_without_graph = self.__dict__.copy()
757
+ dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__
758
+ del dict_without_graph["_graph"]
759
+
760
+ generated_module_name = f"fx-generated._{exporter.get_unique_id()}"
761
+ python_code = self.recompile()
762
+ import_block = _format_import_block(python_code.globals, exporter.importer)
763
+ module_code = import_block + self.code
764
+ exporter.save_source_string(generated_module_name, module_code)
765
+ return (
766
+ reduce_package_graph_module,
767
+ (dict_without_graph, generated_module_name),
768
+ )
769
+
770
+ def __reduce__(self):
771
+ """
772
+ Serialization of GraphModule. We serialize only the generated code, not
773
+ the underlying ``Graph``. This is because ``Graph`` does not have on-disk
774
+ backward-compatibility guarantees, whereas Python source code does.
775
+ On the deserialization side, we symbolically trace through the generated
776
+ code to regenerate the underlying ``Graph``
777
+ """
778
+ dict_without_graph = self.__dict__.copy()
779
+ python_code = self.recompile()
780
+ import_block = _format_import_block(python_code.globals, sys_importer)
781
+ del dict_without_graph["_graph"]
782
+ return (reduce_graph_module, (dict_without_graph, import_block))
783
+
784
+ def _deepcopy_init(self):
785
+ return GraphModule.__init__
786
+
787
+ # because __reduce__ is defined for serialization,
788
+ # we need to define deepcopy otherwise it will call __reduce__
789
+ # and cause symbolic tracing to occur every time we try to copy the object
790
+ def __deepcopy__(self, memo):
791
+ res = type(self).__new__(type(self))
792
+ memo[id(self)] = res
793
+ fake_mod = _CodeOnlyModule(copy.deepcopy(self.__dict__, memo))
794
+ self._deepcopy_init()(res, fake_mod, fake_mod.__dict__["_graph"])
795
+ # hooks are lost during `GraphModule.__init__`, so we need to copy over
796
+ # them explicitly, note right now we are only copying state_dict related
797
+ # hooks, to reduce bc-related issues, we can copy forward/backward related
798
+ # hooks in the future as well if needed
799
+ extra_preserved_attrs = [
800
+ "_state_dict_hooks",
801
+ "_load_state_dict_pre_hooks",
802
+ "_load_state_dict_post_hooks",
803
+ ]
804
+ for attr in extra_preserved_attrs:
805
+ if attr in self.__dict__:
806
+ setattr(res, attr, copy.deepcopy(self.__dict__[attr], memo))
807
+ res.meta = copy.deepcopy(getattr(self, "meta", {}), memo)
808
+ if _USER_PRESERVED_ATTRIBUTES_KEY in res.meta:
809
+ for attr_name, attr in res.meta[_USER_PRESERVED_ATTRIBUTES_KEY].items():
810
+ setattr(res, attr_name, attr)
811
+ return res
812
+
813
+ def __copy__(self):
814
+ res = GraphModule(self, self.graph)
815
+ res.meta = getattr(self, "meta", {})
816
+ return res
817
+
818
+ @compatibility(is_backward_compatible=False)
819
+ def print_readable(self, print_output=True):
820
+ """
821
+ Return the Python code generated for current GraphModule and its children GraphModules
822
+ """
823
+ verbose_python_code = self._graph.python_code(root_module="self", verbose=True)
824
+ module_code = verbose_python_code.src
825
+ module_code = module_code.lstrip("\n")
826
+ module_code = f"class {self._get_name()}(torch.nn.Module):\n" + module_code
827
+ module_code = _addindent(module_code, 4)
828
+
829
+ submodule_code_list = [""]
830
+ for submodule in self.children():
831
+ if isinstance(submodule, GraphModule):
832
+ submodule_code_list.append(submodule.print_readable(print_output=False))
833
+ submodule_code = "\n".join(submodule_code_list)
834
+ submodule_code = _addindent(submodule_code, 4)
835
+
836
+ output = module_code + submodule_code
837
+ if print_output:
838
+ print(module_code + submodule_code)
839
+ return output
840
+
841
+ def __str__(self) -> str:
842
+ orig_str = super().__str__()
843
+ print_readable_reminder = (
844
+ "# To see more debug info, please use `graph_module.print_readable()`"
845
+ )
846
+ return "\n".join([orig_str, self._code, print_readable_reminder])
847
+
848
+ def _replicate_for_data_parallel(self):
849
+ new_gm = self.__copy__()
850
+ new_gm._is_replica = True
851
+ return new_gm
852
+
853
+
854
+ # workarounds for issues in __torch_function__
855
+
856
+ # WAR for __torch_function__ not handling tensor lists,
857
+ # fix is in https://github.com/pytorch/pytorch/pull/34725
858
+ # orig_cat = torch.cat
859
+ # def patched_cat(*args, **kwargs):
860
+ # tensors = args[0]
861
+ # for t in tensors:
862
+ # if isinstance(t, Proxy):
863
+ # return t.__torch_function__(patched_cat, (), args, kwargs)
864
+ # return orig_cat(*args, **kwargs)
865
+ # patched_cat.__module__ = 'torch'
866
+ # patched_cat.__name__ = 'cat'
867
+ # torch.cat = patched_cat
evalkit_tf437/lib/python3.10/site-packages/torch/fx/immutable_collections.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Iterable, List, Tuple
2
+
3
+ from ._compatibility import compatibility
4
+ from torch.utils._pytree import Context, register_pytree_node
5
+
6
+ __all__ = ["immutable_list", "immutable_dict"]
7
+
8
+ _help_mutation = """\
9
+ If you are attempting to modify the kwargs or args of a torch.fx.Node object,
10
+ instead create a new copy of it and assign the copy to the node:
11
+ new_args = ... # copy and mutate args
12
+ node.args = new_args
13
+ """
14
+
15
+ def _no_mutation(self, *args, **kwargs):
16
+ raise NotImplementedError(f"'{type(self).__name__}' object does not support mutation. {_help_mutation}")
17
+
18
+ def _create_immutable_container(base, mutable_functions):
19
+ container = type('immutable_' + base.__name__, (base,), {})
20
+ for attr in mutable_functions:
21
+ setattr(container, attr, _no_mutation)
22
+ return container
23
+
24
+ immutable_list = _create_immutable_container(list,
25
+ ['__delitem__', '__iadd__', '__imul__', '__setitem__', 'append',
26
+ 'clear', 'extend', 'insert', 'pop', 'remove'])
27
+ immutable_list.__reduce__ = lambda self: (immutable_list, (tuple(iter(self)),))
28
+ immutable_list.__hash__ = lambda self: hash(tuple(self))
29
+
30
+ compatibility(is_backward_compatible=True)(immutable_list)
31
+
32
+ immutable_dict = _create_immutable_container(dict, ['__delitem__', '__setitem__', 'clear', 'pop', 'popitem', 'update'])
33
+ immutable_dict.__reduce__ = lambda self: (immutable_dict, (iter(self.items()),))
34
+ immutable_dict.__hash__ = lambda self: hash(tuple(self.items()))
35
+ compatibility(is_backward_compatible=True)(immutable_dict)
36
+
37
+
38
+ # Register immutable collections for PyTree operations
39
+
40
+ def _immutable_dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]:
41
+ return list(d.values()), list(d.keys())
42
+
43
+ def _immutable_dict_unflatten(values: Iterable[Any], context: Context) -> Dict[Any, Any]:
44
+ return immutable_dict(dict(zip(context, values)))
45
+
46
+ def _immutable_list_flatten(d: List[Any]) -> Tuple[List[Any], Context]:
47
+ return d, None
48
+
49
+ def _immutable_list_unflatten(values: Iterable[Any], context: Context) -> List[Any]:
50
+ return immutable_list(values)
51
+
52
+
53
+ register_pytree_node(immutable_dict, _immutable_dict_flatten, _immutable_dict_unflatten)
54
+ register_pytree_node(immutable_list, _immutable_list_flatten, _immutable_list_unflatten)
evalkit_tf437/lib/python3.10/site-packages/torch/fx/operator_schemas.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import inspect
3
+ import numbers
4
+ import types
5
+ import typing
6
+ import enum
7
+ import warnings
8
+ from typing import Any, Callable, Dict, List, Optional, Tuple, NamedTuple, cast, TYPE_CHECKING
9
+ from torch._jit_internal import boolean_dispatched
10
+ from ._compatibility import compatibility
11
+ from torch._ops import OpOverloadPacket, OpOverload
12
+
13
+ if TYPE_CHECKING:
14
+ from .node import Argument
15
+
16
+ __all__ = ["ArgsKwargsPair", "check_for_mutable_operation", "get_signature_for_torch_op", "create_type_hint",
17
+ "type_matches", "normalize_function", "normalize_module"]
18
+
19
+ @compatibility(is_backward_compatible=False)
20
+ class ArgsKwargsPair(NamedTuple):
21
+ """
22
+ Simple named tuple for wrapping args/kwargs pairs.
23
+ """
24
+ args: Tuple[Any, ...]
25
+ kwargs: Dict[str, Any]
26
+
27
+ _manual_overrides : Dict[Callable, List[inspect.Signature]] = {}
28
+
29
+ def _nonzero_schemas():
30
+ signatures = []
31
+
32
+ def nonzero(self):
33
+ pass
34
+ signatures.append(inspect.signature(nonzero))
35
+
36
+ def nonzero(self, *, as_tuple : bool): # type: ignore[no-redef]
37
+ pass
38
+ signatures.append(inspect.signature(nonzero))
39
+
40
+ return signatures
41
+
42
+ _manual_overrides[torch.nonzero] = _nonzero_schemas()
43
+
44
+ class _FakeGlobalNamespace:
45
+ def __getattr__(self, name):
46
+ if name == 'torch':
47
+ return torch
48
+ raise RuntimeError('Expected a torch namespace lookup')
49
+
50
+ _type_eval_globals = {'Tensor' : torch.Tensor, 'Device' : torch.device, 'Layout' : torch.layout,
51
+ 'number' : numbers.Number, 'Future' : torch.jit.Future,
52
+ 'AnyEnumType' : enum.Enum, 'QScheme' : torch.qscheme,
53
+ '__torch__': _FakeGlobalNamespace(), 'NoneType': type(None),
54
+ 't': typing.TypeVar('t')}
55
+ for k in dir(typing):
56
+ _type_eval_globals[k] = getattr(typing, k)
57
+
58
+ def _torchscript_type_to_python_type(ts_type : 'torch._C.JitType') -> Any:
59
+ """
60
+ Convert a TorchScript type to a Python type (including subtypes) via
61
+ eval'ing the annotation_str. _type_eval_globals sets up expressions
62
+ like "List" and "Future" to map to actual types (typing.List and jit.Future)
63
+ """
64
+ return eval(ts_type.annotation_str, _type_eval_globals)
65
+
66
+ def _torchscript_schema_to_signature_impl(ts_schema : torch._C.FunctionSchema) -> inspect.Signature:
67
+ from inspect import Parameter
68
+ parameters : List[Parameter] = []
69
+ for arg in ts_schema.arguments:
70
+ arg_type = _torchscript_type_to_python_type(arg.type)
71
+ default = arg.default_value if arg.has_default_value() else Parameter.empty
72
+ # TODO: Figure out if this is safe. It seems like when generating the type signatures for
73
+ # PythonArgParser, we emit signatures with `input` instead of `self` as the first tensor
74
+ # argument name. Downstream, if someone converts that positional argument to a keyword
75
+ # argument, the name mismatch will break things, so here we're going to normalize the
76
+ # name to "input"
77
+ name = arg.name if arg.name != 'self' else 'input'
78
+ kind = Parameter.KEYWORD_ONLY if arg.kwarg_only else Parameter.POSITIONAL_OR_KEYWORD
79
+ # "from" is a keyword therefore it must be a POSITIONAL_ONLY argument
80
+ if name == "from":
81
+ assert kind == Parameter.POSITIONAL_OR_KEYWORD
82
+ # ParameterKind type is internal implementation detail to inspec package
83
+ # which makes it hard to do type annotation
84
+ kind = Parameter.POSITIONAL_ONLY # type: ignore[assignment]
85
+ # This renders all previous arguments to positional only
86
+ for idx, p in enumerate(parameters):
87
+ assert p.kind == Parameter.POSITIONAL_OR_KEYWORD
88
+ parameters[idx] = Parameter(name=p.name, kind=Parameter.POSITIONAL_ONLY, default=p.default, annotation=p.annotation)
89
+ parameters.append(Parameter(name=name, kind=kind, default=default, annotation=arg_type))
90
+ return_types = [_torchscript_type_to_python_type(ret.type) for ret in ts_schema.returns]
91
+ if len(return_types) == 0:
92
+ return_type = None
93
+ elif len(return_types) == 1:
94
+ return_type = return_types[0]
95
+ else:
96
+ return_type = tuple(return_types)
97
+
98
+ return inspect.Signature(parameters, return_annotation=return_type)
99
+
100
+ _SCHEMA_TO_SIGNATURE_CACHE : Dict[Tuple[str, str], inspect.Signature] = {}
101
+
102
+ def _torchscript_schema_to_signature(ts_schema : torch._C.FunctionSchema) -> inspect.Signature:
103
+ # Cached as it's called in the hot path of FakeTensor dispatch
104
+ cache_key = ts_schema.name, ts_schema.overload_name
105
+ cache_val = _SCHEMA_TO_SIGNATURE_CACHE.get(cache_key)
106
+ if cache_val is not None:
107
+ return cache_val
108
+
109
+ res = _torchscript_schema_to_signature_impl(ts_schema)
110
+ _SCHEMA_TO_SIGNATURE_CACHE[cache_key] = res
111
+ return res
112
+
113
+ @compatibility(is_backward_compatible=False)
114
+ def check_for_mutable_operation(target : Callable, args : Tuple['Argument', ...], kwargs : Dict[str, 'Argument']):
115
+ signatures, schemas = get_signature_for_torch_op(target, return_schemas=True)
116
+
117
+ if signatures and schemas:
118
+ matched_schemas = []
119
+
120
+ # Iterate through all of the schema until we find one that matches
121
+ # If one matches, populate `new_args_and_kwargs` with the new args/kwargs
122
+ # values. If none matches, `new_args_and_kwargs` will be None
123
+ for candidate_signature, schema in zip(signatures, schemas):
124
+ try:
125
+ candidate_signature.bind(*args, **kwargs)
126
+ matched_schemas.append((candidate_signature, schema))
127
+ except TypeError as e:
128
+ continue
129
+
130
+ def throw_if_mutable(schema):
131
+ if schema.is_mutable:
132
+ raise RuntimeError(f'Tried to trace mutable operation {schema}. FX only supports functional '
133
+ f'code, so operations that mutate operands in-place (e.g. via `out` arguments) '
134
+ f'are not supported')
135
+
136
+ if len(matched_schemas) == 0:
137
+ # Did not match any schema. Cannot check for mutation
138
+ pass
139
+ elif len(matched_schemas) == 1:
140
+ # Matched exactly one schema, unambiguous
141
+ _, schema_to_check = matched_schemas[0]
142
+ throw_if_mutable(schema_to_check)
143
+ pass
144
+ else:
145
+ # Ambiguous schema match. Since mutability checking is best effort,
146
+ # do nothing.
147
+ pass
148
+
149
+ @compatibility(is_backward_compatible=False)
150
+ def get_signature_for_torch_op(op : Callable, return_schemas : bool = False):
151
+ """
152
+ Given an operator on the `torch` namespace, return a list of `inspect.Signature`
153
+ objects corresponding to the overloads of that op.. May return `None` if a signature
154
+ could not be retrieved.
155
+
156
+ Args:
157
+ op (Callable): An operator on the `torch` namespace to look up a signature for
158
+
159
+ Returns:
160
+ Optional[List[inspect.Signature]]: A list of signatures for the overloads of this
161
+ operator, or None if the operator signatures could not be retrieved. If
162
+ return_schemas=True, returns a tuple containing the optional Python signatures
163
+ and the optional TorchScript Function signature
164
+ """
165
+ if isinstance(op, OpOverload):
166
+ schemas = [op._schema]
167
+ elif isinstance(op, OpOverloadPacket):
168
+ schemas = [getattr(op, overload)._schema for overload in op.overloads()]
169
+ else:
170
+ override = _manual_overrides.get(op)
171
+ if override:
172
+ return (override, None) if return_schemas else None
173
+
174
+ aten_fn = torch.jit._builtins._find_builtin(op)
175
+
176
+ if aten_fn is None:
177
+ return (None, None) if return_schemas else None
178
+ schemas = torch._C._jit_get_schemas_for_operator(aten_fn)
179
+
180
+ signatures = [_torchscript_schema_to_signature(schema) for schema in schemas]
181
+ return (signatures, schemas) if return_schemas else signatures
182
+
183
+ @compatibility(is_backward_compatible=False)
184
+ def create_type_hint(x):
185
+ try:
186
+ if isinstance(x, (list, tuple)):
187
+ # todo(chilli): Figure out the right way for mypy to handle this
188
+ if isinstance(x, list):
189
+ def ret_type(x):
190
+ return List[x] # type: ignore[valid-type]
191
+ else:
192
+ def ret_type(x):
193
+ return Tuple[x, ...]
194
+ if len(x) == 0:
195
+ return ret_type(Any)
196
+ base_type = x[0]
197
+ for t in x:
198
+ if issubclass(t, base_type):
199
+ continue
200
+ elif issubclass(base_type, t):
201
+ base_type = t
202
+ else:
203
+ return ret_type(Any)
204
+ return ret_type(base_type)
205
+ except Exception as e:
206
+ # We tried to create a type hint for list but failed.
207
+ warnings.warn(f"We were not able to successfully create type hint from the type {x}")
208
+ pass
209
+ return x
210
+
211
+ @compatibility(is_backward_compatible=False)
212
+ def type_matches(signature_type : Any, argument_type : Any):
213
+ sig_origin_type = getattr(signature_type, '__origin__', signature_type)
214
+
215
+ if signature_type is argument_type:
216
+ return True
217
+
218
+ # Union types in signature. Given type needs to match one of the
219
+ # contained types in the Union
220
+ if sig_origin_type is typing.Union and signature_type != argument_type:
221
+ sig_contained = signature_type.__args__
222
+ return any(type_matches(c, argument_type) for c in sig_contained)
223
+
224
+ if signature_type is List[int] and argument_type is int:
225
+ # int can be promoted to List[int]
226
+ return True
227
+
228
+ if getattr(signature_type, '__origin__', None) in {list, List}:
229
+ sig_el_type = signature_type.__args__[0]
230
+ if not inspect.isclass(sig_el_type):
231
+ warnings.warn(
232
+ f"Does not support nested parametric types, got {signature_type}. Please file a bug.")
233
+ return False
234
+ if getattr(argument_type, '__origin__', None) in {list, List}:
235
+ return issubclass(argument_type.__args__[0], sig_el_type)
236
+
237
+ def is_homogeneous_tuple(t):
238
+ if getattr(t, "__origin__", None) not in {tuple, Tuple}:
239
+ return False
240
+ contained = t.__args__
241
+ if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason
242
+ return True
243
+ return all((c is Ellipsis) or issubclass(c, sig_el_type) for c in contained)
244
+
245
+ # Tuple[T] is accepted for List[T] parameters
246
+ return is_homogeneous_tuple(argument_type)
247
+
248
+ # Dtype is an int in schemas
249
+ if signature_type is int and argument_type is torch.dtype:
250
+ return True
251
+
252
+ if signature_type is numbers.Number and argument_type in {int, float}:
253
+ return True
254
+ if inspect.isclass(argument_type) and inspect.isclass(signature_type):
255
+ return issubclass(argument_type, signature_type)
256
+
257
+ return False
258
+
259
+ @compatibility(is_backward_compatible=False)
260
+ def normalize_function(
261
+ target: Callable, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, arg_types : Optional[Tuple[Any]] = None,
262
+ kwarg_types : Optional[Dict[str, Any]] = None,
263
+ normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
264
+ """
265
+ Returns normalized arguments to PyTorch functions. This means that
266
+ `args/kwargs` will be matched up to the functional's
267
+ signature and return exclusively kwargs in positional order if
268
+ `normalize_to_only_use_kwargs` is True.
269
+ Also populates default values. Does not support positional-only
270
+ parameters or varargs parameters (*args, **kwargs). Does not support modules.
271
+
272
+ May require `arg_types` and `kwarg_types` in order to disambiguate overloads.
273
+
274
+ Args:
275
+ target (Callable): Function that we are normalizing
276
+ args (Tuple[Any]): Tuple of args to the function
277
+ kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
278
+ arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args
279
+ kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs
280
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
281
+
282
+ Returns:
283
+
284
+ Returns normalized_args_and_kwargs, or `None` if not successful.
285
+ """
286
+ if kwargs is None:
287
+ kwargs = {}
288
+ new_args_and_kwargs = None
289
+ if not isinstance(target, types.BuiltinFunctionType) and not (
290
+ isinstance(target, (OpOverloadPacket, OpOverload))
291
+ ):
292
+ target_for_analysis = target
293
+ if target in boolean_dispatched:
294
+ # HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
295
+ # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
296
+ # branches of the dispatch have exactly the same signature. If they do, use the `true`
297
+ # branch signature for analysis. Otherwise, leave this un-normalized
298
+ assert not isinstance(target, str)
299
+ dispatched = boolean_dispatched[target]
300
+ if_true, if_false = dispatched['if_true'], dispatched['if_false']
301
+ if inspect.signature(if_true).parameters != inspect.signature(if_false).parameters:
302
+ return None
303
+ target_for_analysis = if_true
304
+
305
+ assert callable(target_for_analysis)
306
+ sig = inspect.signature(inspect.unwrap(target_for_analysis))
307
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, normalize_to_only_use_kwargs)
308
+ else:
309
+ assert callable(target)
310
+ torch_op_schemas = get_signature_for_torch_op(target)
311
+ matched_schemas = []
312
+ if torch_op_schemas:
313
+ # Iterate through all of the schema until we find one that matches
314
+ # If one matches, populate `new_args_and_kwargs` with the new args/kwargs
315
+ # values. If none matches, `new_args_and_kwargs` will be None
316
+ for candidate_signature in torch_op_schemas:
317
+ try:
318
+ candidate_signature.bind(*args, **kwargs)
319
+ matched_schemas.append(candidate_signature)
320
+ except TypeError as e:
321
+ continue
322
+
323
+ if len(matched_schemas) == 0:
324
+ # Did not match any schema. Cannot normalize
325
+ pass
326
+ elif len(matched_schemas) == 1:
327
+ # Matched exactly one schema, unambiguous
328
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(matched_schemas[0], args, kwargs,
329
+ normalize_to_only_use_kwargs)
330
+ else:
331
+ if arg_types is not None or kwarg_types is not None:
332
+ arg_types = arg_types if arg_types else cast(Tuple[Any], ())
333
+ kwarg_types = kwarg_types if kwarg_types else {}
334
+ for candidate_signature in torch_op_schemas:
335
+ sig_matches = True
336
+ try:
337
+ bound_types = candidate_signature.bind(*arg_types, **kwarg_types)
338
+ for arg_name, arg_type in bound_types.arguments.items():
339
+ param = candidate_signature.parameters[arg_name]
340
+ sig_matches = sig_matches and type_matches(param.annotation, arg_type)
341
+ except TypeError as e:
342
+ sig_matches = False
343
+ if sig_matches:
344
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(candidate_signature, args, kwargs,
345
+ normalize_to_only_use_kwargs)
346
+ break
347
+ else:
348
+ # Matched more than one schema. In this situation, the caller must provide the types of
349
+ # the arguments of the overload they expect.
350
+ schema_printouts = '\n'.join(str(schema) for schema in matched_schemas)
351
+ raise RuntimeError(f'Tried to normalize arguments to {torch.typename(target)} but '
352
+ f'the schema match was ambiguous! Please provide argument types to '
353
+ f'the normalize_arguments() call. Available schemas:\n{schema_printouts}')
354
+
355
+ return new_args_and_kwargs
356
+
357
+ @compatibility(is_backward_compatible=False)
358
+ def normalize_module(
359
+ root: torch.nn.Module, target: str, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None,
360
+ normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
361
+ """
362
+ Returns normalized arguments to PyTorch modules. This means that
363
+ `args/kwargs` will be matched up to the functional's
364
+ signature and return exclusively kwargs in positional order if
365
+ `normalize_to_only_use_kwargs` is True.
366
+ Also populates default values. Does not support positional-only
367
+ parameters or varargs parameters (*args, **kwargs).
368
+
369
+ Args:
370
+ root (nn.Module): root module upon which we query modules
371
+ target (Callable): Function that we are normalizing
372
+ args (Tuple[Any]): Tuple of args to the function
373
+ kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
374
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
375
+
376
+ Returns:
377
+
378
+ Returns normalized_args_and_kwargs, or `None` if not successful.
379
+ """
380
+ try:
381
+ submod = root.get_submodule(target)
382
+ except AttributeError as e:
383
+ raise RuntimeError(f"Tried to normalize node with target {target} but root did not "
384
+ f"have that target!") from e
385
+ if hasattr(submod.__class__, '__name__'):
386
+ classname = submod.__class__.__name__
387
+ if getattr(torch.nn, classname, None) == submod.__class__:
388
+ sig = inspect.signature(inspect.unwrap(submod.forward))
389
+ if kwargs is None:
390
+ kwargs = {}
391
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs,
392
+ normalize_to_only_use_kwargs)
393
+ return new_args_and_kwargs
394
+ return None
395
+
396
+ def _args_kwargs_to_normalized_args_kwargs(sig : inspect.Signature, args : Tuple[Any, ...],
397
+ kwargs : Dict[str, Any],
398
+ normalize_to_only_use_kwargs : bool) -> Optional[ArgsKwargsPair]:
399
+ """
400
+ Given a call target, args, and kwargs, return the arguments normalized into
401
+ an ArgsKwargsPair, or None if the type signature is not supported by
402
+ this normalization.
403
+
404
+ Args:
405
+
406
+ sig (inspect.Signature): Signature object for the target
407
+ args (Tuple): Arguments that appear at the callsite for `target`
408
+ kwargs (Dict): Keyword arguments that appear at the callsite for `target`
409
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
410
+
411
+ Returns:
412
+
413
+ Optional[ArgsKwargsPair]: Normalized args and kwargs for `target`, or `None` if
414
+ this target is not supported.
415
+ """
416
+
417
+ # Don't currently support positional-only
418
+ # or varargs (*args, **kwargs) signatures
419
+ supported_parameter_types = {
420
+ inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY}
421
+ if any(p.kind not in supported_parameter_types for p in sig.parameters.values()):
422
+ # Add an exception for one signature, which is common for random/uniform, i.e.:
423
+ # Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None
424
+ # `from` is Python keyword and as such functions with that signature should have
425
+ # positional-only args, but at the same time they could be dispatched as kwargs
426
+ if list(sig.parameters.keys()) != ['input', 'from', 'to', 'generator']:
427
+ return None
428
+
429
+ bound_args = sig.bind(*args, **kwargs)
430
+ bound_args.apply_defaults()
431
+
432
+ new_kwargs : Dict[str, Any] = {}
433
+ new_args : List[Any] = []
434
+ for i, param in enumerate(sig.parameters):
435
+ if not normalize_to_only_use_kwargs and i < len(args):
436
+ new_args.append(bound_args.arguments[param])
437
+ else:
438
+ new_kwargs[param] = bound_args.arguments[param]
439
+
440
+ return ArgsKwargsPair(tuple(new_args), new_kwargs)
evalkit_tf437/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b34ef3bac735c84b15d0cb3df24157a758f9caa3d50e29490e1cbe6d2a9bc6a7
3
+ size 435665
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimd.c ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef _MSC_VER
2
+ #include <Intrin.h>
3
+ #endif
4
+ #include <arm_neon.h>
5
+
6
+ int main(int argc, char **argv)
7
+ {
8
+ float *src = (float*)argv[argc-1];
9
+ float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
10
+ /* MAXMIN */
11
+ int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0);
12
+ ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0);
13
+ /* ROUNDING */
14
+ ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0);
15
+ #ifdef __aarch64__
16
+ {
17
+ double *src2 = (double*)argv[argc-1];
18
+ float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
19
+ /* MAXMIN */
20
+ ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0);
21
+ ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0);
22
+ /* ROUNDING */
23
+ ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0);
24
+ }
25
+ #endif
26
+ return ret;
27
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimddp.c ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef _MSC_VER
2
+ #include <Intrin.h>
3
+ #endif
4
+ #include <arm_neon.h>
5
+
6
+ int main(int argc, char **argv)
7
+ {
8
+ unsigned char *src = (unsigned char*)argv[argc-1];
9
+ uint8x16_t v1 = vdupq_n_u8(src[0]), v2 = vdupq_n_u8(src[1]);
10
+ uint32x4_t va = vdupq_n_u32(3);
11
+ int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0);
12
+ #ifdef __aarch64__
13
+ ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0);
14
+ #endif
15
+ return ret;
16
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdfhm.c ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef _MSC_VER
2
+ #include <Intrin.h>
3
+ #endif
4
+ #include <arm_neon.h>
5
+
6
+ int main(int argc, char **argv)
7
+ {
8
+ float16_t *src = (float16_t*)argv[argc-1];
9
+ float *src2 = (float*)argv[argc-2];
10
+ float16x8_t vhp = vdupq_n_f16(src[0]);
11
+ float16x4_t vlhp = vdup_n_f16(src[1]);
12
+ float32x4_t vf = vdupq_n_f32(src2[0]);
13
+ float32x2_t vlf = vdup_n_f32(src2[1]);
14
+
15
+ int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0);
16
+ ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0);
17
+
18
+ return ret;
19
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdhp.c ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef _MSC_VER
2
+ #include <Intrin.h>
3
+ #endif
4
+ #include <arm_neon.h>
5
+
6
+ int main(int argc, char **argv)
7
+ {
8
+ float16_t *src = (float16_t*)argv[argc-1];
9
+ float16x8_t vhp = vdupq_n_f16(src[0]);
10
+ float16x4_t vlhp = vdup_n_f16(src[1]);
11
+
12
+ int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0);
13
+ ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0);
14
+ return ret;
15
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx.c ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __AVX__
10
+ #error "HOST/ARCH doesn't support AVX"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ __m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1]));
19
+ return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
20
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx2.c ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __AVX2__
10
+ #error "HOST/ARCH doesn't support AVX2"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ __m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1]));
19
+ return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
20
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_clx.c ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __AVX512VNNI__
10
+ #error "HOST/ARCH doesn't support CascadeLake AVX512 features"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ /* VNNI */
19
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
20
+ a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a);
21
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
22
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__)
10
+ #error "HOST/ARCH doesn't support CannonLake AVX512 features"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
19
+ /* IFMA */
20
+ a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512());
21
+ /* VMBI */
22
+ a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a);
23
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
24
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_icl.c ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__)
10
+ #error "HOST/ARCH doesn't support IceLake AVX512 features"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
19
+ /* VBMI2 */
20
+ a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512());
21
+ /* BITLAG */
22
+ a = _mm512_popcnt_epi8(a);
23
+ /* VPOPCNTDQ */
24
+ a = _mm512_popcnt_epi64(a);
25
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
26
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knl.c ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #if !defined(__AVX512ER__) || !defined(__AVX512PF__)
10
+ #error "HOST/ARCH doesn't support Knights Landing AVX512 features"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ int base[128]={};
19
+ __m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]);
20
+ /* ER */
21
+ __m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad));
22
+ /* PF */
23
+ _mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1);
24
+ return base[0];
25
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knm.c ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__)
10
+ #error "HOST/ARCH doesn't support Knights Mill AVX512 features"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
19
+ __m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]);
20
+
21
+ /* 4FMAPS */
22
+ b = _mm512_4fmadd_ps(b, b, b, b, b, NULL);
23
+ /* 4VNNIW */
24
+ a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL);
25
+ /* VPOPCNTDQ */
26
+ a = _mm512_popcnt_epi64(a);
27
+
28
+ a = _mm512_add_epi32(a, _mm512_castps_si512(b));
29
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
30
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_skx.c ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__)
10
+ #error "HOST/ARCH doesn't support SkyLake AVX512 features"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ __m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
19
+ /* VL */
20
+ __m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1));
21
+ /* DQ */
22
+ __m512i b = _mm512_broadcast_i32x8(a);
23
+ /* BW */
24
+ b = _mm512_abs_epi16(b);
25
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(b));
26
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_spr.c ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #if !defined(__AVX512FP16__)
10
+ #error "HOST/ARCH doesn't support Sapphire Rapids AVX512FP16 features"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ /* clang has a bug regarding our spr coode, see gh-23730. */
19
+ #if __clang__
20
+ #error
21
+ #endif
22
+ __m512h a = _mm512_loadu_ph((void*)argv[argc-1]);
23
+ __m512h temp = _mm512_fmadd_ph(a, a, a);
24
+ _mm512_storeu_ph((void*)(argv[argc-1]), temp);
25
+ return 0;
26
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512cd.c ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __AVX512CD__
10
+ #error "HOST/ARCH doesn't support AVX512CD"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ __m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
19
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
20
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512f.c ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __AVX512F__
10
+ #error "HOST/ARCH doesn't support AVX512F"
11
+ #endif
12
+ #endif
13
+
14
+ #include <immintrin.h>
15
+
16
+ int main(int argc, char **argv)
17
+ {
18
+ __m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
19
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
20
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_f16c.c ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __F16C__
10
+ #error "HOST/ARCH doesn't support F16C"
11
+ #endif
12
+ #endif
13
+
14
+ #include <emmintrin.h>
15
+ #include <immintrin.h>
16
+
17
+ int main(int argc, char **argv)
18
+ {
19
+ __m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1]));
20
+ __m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2]));
21
+ return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8)));
22
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma3.c ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #if !defined(__FMA__) && !defined(__AVX2__)
10
+ #error "HOST/ARCH doesn't support FMA3"
11
+ #endif
12
+ #endif
13
+
14
+ #include <xmmintrin.h>
15
+ #include <immintrin.h>
16
+
17
+ int main(int argc, char **argv)
18
+ {
19
+ __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
20
+ a = _mm256_fmadd_ps(a, a, a);
21
+ return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
22
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma4.c ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <immintrin.h>
2
+ #ifdef _MSC_VER
3
+ #include <ammintrin.h>
4
+ #else
5
+ #include <x86intrin.h>
6
+ #endif
7
+
8
+ int main(int argc, char **argv)
9
+ {
10
+ __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
11
+ a = _mm256_macc_ps(a, a, a);
12
+ return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
13
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon.c ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef _MSC_VER
2
+ #include <Intrin.h>
3
+ #endif
4
+ #include <arm_neon.h>
5
+
6
+ int main(int argc, char **argv)
7
+ {
8
+ // passing from untraced pointers to avoid optimizing out any constants
9
+ // so we can test against the linker.
10
+ float *src = (float*)argv[argc-1];
11
+ float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
12
+ int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0);
13
+ #ifdef __aarch64__
14
+ double *src2 = (double*)argv[argc-2];
15
+ float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
16
+ ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0);
17
+ #endif
18
+ return ret;
19
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_fp16.c ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef _MSC_VER
2
+ #include <Intrin.h>
3
+ #endif
4
+ #include <arm_neon.h>
5
+
6
+ int main(int argc, char **argv)
7
+ {
8
+ short *src = (short*)argv[argc-1];
9
+ float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16(src));
10
+ return (int)vgetq_lane_f32(v_z4, 0);
11
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef _MSC_VER
2
+ #include <Intrin.h>
3
+ #endif
4
+ #include <arm_neon.h>
5
+
6
+ int main(int argc, char **argv)
7
+ {
8
+ float *src = (float*)argv[argc-1];
9
+ float32x4_t v1 = vdupq_n_f32(src[0]);
10
+ float32x4_t v2 = vdupq_n_f32(src[1]);
11
+ float32x4_t v3 = vdupq_n_f32(src[2]);
12
+ int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0);
13
+ #ifdef __aarch64__
14
+ double *src2 = (double*)argv[argc-2];
15
+ float64x2_t vd1 = vdupq_n_f64(src2[0]);
16
+ float64x2_t vd2 = vdupq_n_f64(src2[1]);
17
+ float64x2_t vd3 = vdupq_n_f64(src2[2]);
18
+ ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0);
19
+ #endif
20
+ return ret;
21
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_popcnt.c ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #if !defined(__SSE4_2__) && !defined(__POPCNT__)
10
+ #error "HOST/ARCH doesn't support POPCNT"
11
+ #endif
12
+ #endif
13
+
14
+ #ifdef _MSC_VER
15
+ #include <nmmintrin.h>
16
+ #else
17
+ #include <popcntintrin.h>
18
+ #endif
19
+
20
+ int main(int argc, char **argv)
21
+ {
22
+ // To make sure popcnt instructions are generated
23
+ // and been tested against the assembler
24
+ unsigned long long a = *((unsigned long long*)argv[argc-1]);
25
+ unsigned int b = *((unsigned int*)argv[argc-2]);
26
+
27
+ #if defined(_M_X64) || defined(__x86_64__)
28
+ a = _mm_popcnt_u64(a);
29
+ #endif
30
+ b = _mm_popcnt_u32(b);
31
+ return (int)a + b;
32
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse.c ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __SSE__
10
+ #error "HOST/ARCH doesn't support SSE"
11
+ #endif
12
+ #endif
13
+
14
+ #include <xmmintrin.h>
15
+
16
+ int main(void)
17
+ {
18
+ __m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps());
19
+ return (int)_mm_cvtss_f32(a);
20
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse2.c ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __SSE2__
10
+ #error "HOST/ARCH doesn't support SSE2"
11
+ #endif
12
+ #endif
13
+
14
+ #include <emmintrin.h>
15
+
16
+ int main(void)
17
+ {
18
+ __m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128());
19
+ return _mm_cvtsi128_si32(a);
20
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse3.c ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __SSE3__
10
+ #error "HOST/ARCH doesn't support SSE3"
11
+ #endif
12
+ #endif
13
+
14
+ #include <pmmintrin.h>
15
+
16
+ int main(void)
17
+ {
18
+ __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
19
+ return (int)_mm_cvtss_f32(a);
20
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse41.c ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __SSE4_1__
10
+ #error "HOST/ARCH doesn't support SSE41"
11
+ #endif
12
+ #endif
13
+
14
+ #include <smmintrin.h>
15
+
16
+ int main(void)
17
+ {
18
+ __m128 a = _mm_floor_ps(_mm_setzero_ps());
19
+ return (int)_mm_cvtss_f32(a);
20
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse42.c ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __SSE4_2__
10
+ #error "HOST/ARCH doesn't support SSE42"
11
+ #endif
12
+ #endif
13
+
14
+ #include <smmintrin.h>
15
+
16
+ int main(void)
17
+ {
18
+ __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
19
+ return (int)_mm_cvtss_f32(a);
20
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_ssse3.c ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
2
+ /*
3
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
4
+ * whether or not the build options for those features are specified.
5
+ * Therefore, we must test #definitions of CPU features when option native/host
6
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
7
+ * the test will be broken and leads to enable all possible features.
8
+ */
9
+ #ifndef __SSSE3__
10
+ #error "HOST/ARCH doesn't support SSSE3"
11
+ #endif
12
+ #endif
13
+
14
+ #include <tmmintrin.h>
15
+
16
+ int main(void)
17
+ {
18
+ __m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128());
19
+ return (int)_mm_cvtsi128_si32(a);
20
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx2.c ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef __VSX__
2
+ #error "VSX is not supported"
3
+ #endif
4
+ #include <altivec.h>
5
+
6
+ typedef __vector unsigned long long v_uint64x2;
7
+
8
+ int main(void)
9
+ {
10
+ v_uint64x2 z2 = (v_uint64x2){0, 0};
11
+ z2 = (v_uint64x2)vec_cmpeq(z2, z2);
12
+ return (int)vec_extract(z2, 0);
13
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx3.c ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef __VSX__
2
+ #error "VSX is not supported"
3
+ #endif
4
+ #include <altivec.h>
5
+
6
+ typedef __vector unsigned int v_uint32x4;
7
+
8
+ int main(void)
9
+ {
10
+ v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0};
11
+ z4 = vec_absd(z4, z4);
12
+ return (int)vec_extract(z4, 0);
13
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx4.c ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef __VSX__
2
+ #error "VSX is not supported"
3
+ #endif
4
+ #include <altivec.h>
5
+
6
+ typedef __vector unsigned int v_uint32x4;
7
+
8
+ int main(void)
9
+ {
10
+ v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16};
11
+ v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2};
12
+ v_uint32x4 v3 = vec_mod(v1, v2);
13
+ return (int)vec_extractm(v3);
14
+ }
falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vx.c ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if (__VEC__ < 10301) || (__ARCH__ < 11)
2
+ #error VX not supported
3
+ #endif
4
+
5
+ #include <vecintrin.h>
6
+ int main(int argc, char **argv)
7
+ {
8
+ __vector double x = vec_abs(vec_xl(argc, (double*)argv));
9
+ __vector double y = vec_load_len((double*)argv, (unsigned int)argc);
10
+
11
+ x = vec_round(vec_ceil(x) + vec_floor(y));
12
+ __vector bool long long m = vec_cmpge(x, y);
13
+ __vector long long i = vec_signed(vec_sel(x, y, m));
14
+
15
+ return (int)vec_extract(i, 0);
16
+ }