diff --git a/.gitattributes b/.gitattributes index bba595e7a701124c6dd43c76f7eed9a705e3aa68..ad96f3976d8527dea8b76328dcc150b792b3f438 100644 --- a/.gitattributes +++ b/.gitattributes @@ -568,3 +568,6 @@ falcon/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expans falcon/lib/python3.10/site-packages/PIL/_imagingmath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text falcon/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_datetime.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text falcon/lib/python3.10/site-packages/psutil/_psutil_linux.abi3.so filter=lfs diff=lfs merge=lfs -text +evalkit_tf437/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +deepseek/lib/python3.10/site-packages/xformers/_C.so filter=lfs diff=lfs merge=lfs -text +falcon/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/deepseek/lib/python3.10/site-packages/xformers/_C.so b/deepseek/lib/python3.10/site-packages/xformers/_C.so new file mode 100644 index 0000000000000000000000000000000000000000..a3ff7c6fcb76ee76214a4d69a8c6366ad7309ae4 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/xformers/_C.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f09f83477c28599853978c2731663e517f27fba277141ff74d6acc1ea5c60cb +size 50942528 diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21acd1af8f4da8cb7e65090f76a9b9d841308e0e Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14b3b21b8663d667f8b0c6ccd866e70b1e9c89ae Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02d2394b99111d47bd4324f3c5811b147f527783 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03eff59f513307ca32a551fd6be90f12c613f512 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b1ea76ab194354e16c72d1b02514cec7ca70339 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f00e5186fd875e7eaf2ee866b52d9163a0bb10a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec9b39af0aaa3dc27cc50329a152742531d89f0f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd0ef36739d2200e7461ae282ed7e176f5709008 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f95cc2e259207d579b2e3bd907d7c48d3e9cbec5 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..799bd773ef0c9b7fdc17d40dcacf4a6ca23a2149 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..249ce9b1157829d47d8fd833068de7e4da54cf1c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__init__.py @@ -0,0 +1,55 @@ +import threading + +import torch._C._lazy +from torch.utils._pytree import tree_flatten, tree_unflatten + +from .closure import add_step_closure, run_step_closures + + +def mark_step(device: str = "", wait=False): + """Triggers a mark step, which amounts to + - collecting a group of 'live' lazy tensors to index into the compilation cache + (lowering/compiling their IR graphs if not cached) + - kicking off execution of the compiled function + - (optionally, wait=True) waiting for cpu-side execution to complete (does not sync the accelerator) + """ + # TODO(whc) expand this to include backend hooks and align with XLA backend needs + torch._C._lazy._mark_step(device, [], wait=wait) + + run_step_closures() + + +def wait_device_ops(devices=None): + """Waits for all the async operations on the given devices to complete. + Args: + devices (string..., optional): The devices whose async ops need to be waited + for. If empty, all the local devices will be waited for. + """ + if devices is None: + devices = [] + torch._C._lazy._wait_device_ops(devices=devices) + + +def sync_multi(tensors, devices): + """ + Sync the list of lazy tensors so there IR get lowered for the activate backend + and the compiled computation graph get cached. + """ + torch._C._lazy._sync_multi(tensors, devices) + + +def get_tensor_id(tensor): + """Return a unique id of the lazy tensor maintained by LTC""" + return torch._C._lazy._get_tensor_id(tensor) + + +def to_cpu(tensors, devices=None): + devices = devices or ["lazy"] + + flattened, spec = tree_flatten(tensors) + sync_multi(flattened, devices) + return tree_unflatten([t.to("cpu") for t in flattened], spec) + + +def save(tensors, *args, **kwargs): + torch.save(to_cpu(tensors), *args, **kwargs) diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/fx/config.py b/evalkit_tf437/lib/python3.10/site-packages/torch/fx/config.py new file mode 100644 index 0000000000000000000000000000000000000000..da5120d6edf180f7fbbe88ac342b4d0e4b383e50 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/fx/config.py @@ -0,0 +1,6 @@ +# Whether to disable showing progress on compilation passes +# Need to add a new config otherwise wil get a circular import if dynamo config is imported here +disable_progress = True + +# If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy +verbose_progress = False diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/fx/graph_module.py b/evalkit_tf437/lib/python3.10/site-packages/torch/fx/graph_module.py new file mode 100644 index 0000000000000000000000000000000000000000..7951faebdbd7bb9a7334d5f9c383bc1edfe5182f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/fx/graph_module.py @@ -0,0 +1,867 @@ +import copy +import itertools +import linecache +import os +import sys +import traceback +import warnings +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Set, Type, Union + +import torch +import torch.nn as nn +import torch.overrides +from torch.nn.modules.module import _addindent +from torch.package import Importer, PackageExporter, PackageImporter, sys_importer + +from ._compatibility import compatibility +from .graph import _custom_builtins, _is_from_torch, _PyTreeCodeGen, Graph, PythonCode + +__all__ = [ + "reduce_graph_module", + "reduce_package_graph_module", + "reduce_deploy_graph_module", + "GraphModule", +] + +_USER_PRESERVED_ATTRIBUTES_KEY = "_user_preserved_attributes" + +# Normal exec loses the source code, however we can work with +# the linecache module to recover it. +# Using _exec_with_source will add it to our local cache +# and then tools like TorchScript will be able to get source info. +class _EvalCacheLoader: + def __init__(self): + self.eval_cache = {} + self.next_id = 0 + + def cache(self, src: str, globals: Dict[str, Any], co_fields=None): + """Store the source in a private cache, and add a lazy entry in linecache + that allows the source to be retrieved by 'filename'. + + Args: + src (str): The module source to cache + globals (dict): The module globals + + Returns: + str: The cache key (and dummy filename) generated for src. + """ + + key = self._get_key() + if co_fields: + key += f" from {co_fields['co_filename']}:{co_fields['co_firstlineno']} in {co_fields['co_name']}" + self.eval_cache[key] = src + + # Don't mutate globals so that this loader is only used + # to populate linecache, and doesn't interact with other modules + # that might check `__loader__` + globals_copy = globals.copy() + globals_copy["__file__"] = key + globals_copy["__name__"] = key + globals_copy["__loader__"] = self + linecache.lazycache(key, globals_copy) + + return key + + # Part of the loader protocol (PEP 302) + # linecache will use this method when trying to find source code + def get_source(self, module_name) -> Optional[str]: + if module_name in self.eval_cache: + return self.eval_cache[module_name] + return None + + def _get_key(self): + key = f".{self.next_id}" + self.next_id += 1 + return key + + +_loader = _EvalCacheLoader() + + +def _exec_with_source(src: str, globals: Dict[str, Any], co_fields=None): + key = _loader.cache(src, globals, co_fields) + exec(compile(src, key, "exec"), globals) + + +def _forward_from_src(src: str, globals: Dict[str, Any], co_fields=None): + return _method_from_src( + method_name="forward", src=src, globals=globals, co_fields=co_fields + ) + + +def _method_from_src( + method_name: str, src: str, globals: Dict[str, Any], co_fields=None +) -> Callable: + # avoid mutating the passed in dict + globals_copy = globals.copy() + _exec_with_source(src, globals_copy, co_fields) + fn = globals_copy[method_name] + del globals_copy[method_name] + return fn + + +def _format_import_statement(name: str, obj: Any, importer: Importer) -> str: + if name in _custom_builtins: + return _custom_builtins[name].import_str + if _is_from_torch(name): + return "import torch" + module_name, attr_name = importer.get_name(obj) + return f"from {module_name} import {attr_name} as {name}" + + +def _format_import_block(globals: Dict[str, Any], importer: Importer): + import_strs: Set[str] = set() + for name, obj in globals.items(): + import_strs.add(_format_import_statement(name, obj, importer)) + # Sort the imports so we have a stable import block that allows us to + # hash the graph module and get a consistent key for use in a cache. + return "\n".join(sorted(import_strs)) + + +@compatibility(is_backward_compatible=True) +def reduce_graph_module(body: Dict[Any, Any], import_block: str) -> torch.nn.Module: + # BC: attribute name was changed from `code` to `_code` to facilitate + # making `code` into a property and adding a docstring to it + fn_src = body.get("_code") or body["code"] + forward = _forward_from_src(import_block + fn_src, {}) + return _deserialize_graph_module(forward, body) + + +@compatibility(is_backward_compatible=True) +def reduce_package_graph_module( + importer: PackageImporter, body: Dict[Any, Any], generated_module_name: str +) -> torch.nn.Module: + forward = importer.import_module(generated_module_name).forward + return _deserialize_graph_module(forward, body) + + +@compatibility(is_backward_compatible=True) +def reduce_deploy_graph_module( + importer: PackageImporter, body: Dict[Any, Any], import_block: str +) -> torch.nn.Module: + ns = {} + ns["__builtins__"] = importer.patched_builtins + fn_src = body.get("_code") + assert fn_src is not None + forward = _forward_from_src(import_block + fn_src, ns) + return _deserialize_graph_module(forward, body) + + +# We create a dummy class here because symbolic_trace pulls the forward() +# function off of the class, rather than the instance. This class is used +# in _deserialize_graph_module() below. +class _CodeOnlyModule(torch.nn.Module): + def __init__(self, body): + super().__init__() + self.__dict__ = body + + +def _deserialize_graph_module(forward, body: Dict[Any, Any], graph_module_cls=None) -> torch.nn.Module: + """ + Deserialize a GraphModule given the dictionary of the original module, + using the code to reconstruct the graph. We delete the actual graph before + saving the dictionary so that changes to the in-memory graph format do not + get serialized. + """ + + # Try to retrieve the forward source in a backward-compatible way + _CodeOnlyModule.forward = forward + + tracer_cls = body.get("_tracer_cls") + if tracer_cls is None: + from ._symbolic_trace import Tracer + + tracer_cls = Tracer + + graphmodule_cls_name = body.get("_graphmodule_cls_name", "GraphModule") + + # This is a workaround for a mypy linter issue related to + # passing base class as an argument - https://github.com/python/mypy/issues/5865. + cls_tracer: Any = tracer_cls + + class KeepModules(cls_tracer): + # we shouldn't trace into any of the submodules, + # because they were not traced in the original GraphModule + def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool: + return True + + com = _CodeOnlyModule(body) + + tracer_extras = body.get("_tracer_extras", {}) + graph = KeepModules().trace(com, **tracer_extras) + + # Manually set Tracer class on the reconstructed Graph, to avoid + # referencing the private local subclass KeepModules. + graph._tracer_cls = tracer_cls + if graph_module_cls is None: + graph_module_cls = GraphModule + gm = graph_module_cls(com, graph, class_name=graphmodule_cls_name) + + # The GraphModule constructor only retains attributes referenced by the graph. + # In this case, our goal is return a GraphModule as close to identical as the one + # put into the package. If any additional attributes were present in body, + # we should keep them. + for k, v in body.items(): + if not hasattr(gm, k): + setattr(gm, k, v) + return gm + + +# copy an attribute value with qualified name 'target' from 'from_module' to 'to_module' +# This installs empty Modules where none exist yet if they are subpaths of target +def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str): + *prefix, field = target.split(".") + for item in prefix: + f = getattr(from_module, item) + t = getattr(to_module, item, None) + if f is t: + # we have already installed one of its parents + # (e.g. target = root.linear.weight, but we have already installed root.linear) + # once we install a parent, we no longer need to copy the children + # since all the needed properties will already be present + return + + if t is None: + t = torch.nn.Module() + setattr(to_module, item, t) + from_module, to_module = f, t + + orig = getattr(from_module, field) + # If it is a tensor and not a parameter attribute of a module, it should be a named buffer. + # So, we register it as a named buffer in the target module. + if isinstance(orig, torch.Tensor) and not isinstance(orig, torch.nn.Parameter): + to_module.register_buffer(field, orig) + else: + setattr(to_module, field, orig) + + +# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module +# This installs empty Modules where none exist yet if they are subpaths of target +def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str): + *prefix, field = target.split(".") + for item in prefix: + t = getattr(to_module, item, None) + + if t is None: + t = torch.nn.Module() + setattr(to_module, item, t) + to_module = t + + # If it is a tensor and not a parameter attribute of a module, it should be a named buffer. + # So, we register it as a named buffer in the target module. + if isinstance(from_obj, torch.Tensor) and not isinstance( + from_obj, torch.nn.Parameter + ): + to_module.register_buffer(field, from_obj) + else: + setattr(to_module, field, from_obj) + + +class _WrappedCall: + def __init__(self, cls, cls_call): + self.cls = cls + self.cls_call = cls_call + + # Previously, if an error occurred when valid + # symbolically-traced code was run with an invalid input, the + # user would see the source of the error as coming from + # `File "`, where N is some number. We use + # this function to generate a more informative error message. We + # return the traceback itself, a message explaining that the + # error occurred in a traced Module's generated forward + # function, and five lines of context surrounding the faulty + # line + @staticmethod + def _generate_error_message(frame_summary: traceback.FrameSummary) -> str: + # auxiliary variables (for readability) + err_lineno = frame_summary.lineno + assert err_lineno is not None + line = frame_summary.line + assert line is not None + err_line_len = len(line) + all_src_lines = linecache.getlines(frame_summary.filename) + + # constituent substrings of the error message + tb_repr = traceback.format_exc() + custom_msg = ( + "Call using an FX-traced Module, " + f"line {err_lineno} of the traced Module's " + "generated forward function:" + ) + before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno]) + marker = "~" * err_line_len + "~~~ <--- HERE" + err_and_after_err = "\n".join(all_src_lines[err_lineno : err_lineno + 2]) + + # joined message + return "\n".join([tb_repr, custom_msg, before_err, marker, err_and_after_err]) + + def __call__(self, obj, *args, **kwargs): + try: + if self.cls_call is not None: + return self.cls_call(obj, *args, **kwargs) + else: + return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc] + except Exception as e: + assert e.__traceback__ + topmost_framesummary: traceback.FrameSummary = ( + traceback.StackSummary.extract(traceback.walk_tb(e.__traceback__))[-1] + ) # type: ignore[arg-type] + if "eval_with_key" in topmost_framesummary.filename: + print( + _WrappedCall._generate_error_message(topmost_framesummary), + file=sys.stderr, + ) + raise e.with_traceback(None) # noqa: TRY200 + else: + raise e + + +@compatibility(is_backward_compatible=True) +class GraphModule(torch.nn.Module): + """ + GraphModule is an nn.Module generated from an fx.Graph. Graphmodule has a + ``graph`` attribute, as well as ``code`` and ``forward`` attributes generated + from that ``graph``. + + .. warning:: + + When ``graph`` is reassigned, ``code`` and ``forward`` will be automatically + regenerated. However, if you edit the contents of the ``graph`` without reassigning + the ``graph`` attribute itself, you must call ``recompile()`` to update the generated + code. + """ + + def __new__(cls: "Type[GraphModule]", *args, **kwargs): + # each instance of a graph module needs its own forward method + # so create a new singleton class for each instance. + # it is a subclass of the user-defined class, the only difference + # is an extra layer to install the forward method + + # address issue described at https://github.com/pytorch/pytorch/issues/63883 + # in other words, traverse class hierarchy to fix the redundant class definition problem + for t in cls.__mro__: + c = t.__qualname__.split(".")[-1] + if c != "GraphModuleImpl": + cls = t + break + + class GraphModuleImpl(cls): # type: ignore[misc, valid-type] + pass + + return super().__new__(GraphModuleImpl) + + @compatibility(is_backward_compatible=True) + def __init__( + self, + root: Union[torch.nn.Module, Dict[str, Any]], + graph: Graph, + class_name: str = "GraphModule", + ): + """ + Construct a GraphModule. + + Args: + + root (Union[torch.nn.Module, Dict[str, Any]): + ``root`` can either be an nn.Module instance or a Dict mapping strings to any attribute type. + In the case that ``root`` is a Module, any references to Module-based objects (via qualified + name) in the Graph's Nodes' ``target`` field will be copied over from the respective place + within ``root``'s Module hierarchy into the GraphModule's module hierarchy. + In the case that ``root`` is a dict, the qualified name found in a Node's ``target`` will be + looked up directly in the dict's keys. The object mapped to by the Dict will be copied + over into the appropriate place within the GraphModule's module hierarchy. + + graph (Graph): ``graph`` contains the nodes this GraphModule should use for code generation + + class_name (str): ``name`` denotes the name of this GraphModule for debugging purposes. If it's unset, all + error messages will report as originating from ``GraphModule``. It may be helpful to set this + to ``root``'s original name or a name that makes sense within the context of your transform. + """ + super().__init__() + self.__class__.__name__ = class_name + if isinstance(root, torch.nn.Module): + if hasattr(root, "training"): + self.training = root.training + + # When we pickle/unpickle graph module, we don't want to drop any module or attributes. + if isinstance(root, _CodeOnlyModule): + for k, _ in root.named_children(): + _copy_attr(root, self, k) + + for k, _ in root.named_buffers(): + _copy_attr(root, self, k) + + for k, _ in root.named_parameters(): + _copy_attr(root, self, k) + + for node in graph.nodes: + if node.op in ["get_attr", "call_module"]: + assert isinstance(node.target, str) + _copy_attr(root, self, node.target) + elif isinstance(root, dict): + targets_to_copy = [] + for node in graph.nodes: + if node.op in ["get_attr", "call_module"]: + assert isinstance(node.target, str) + if node.target not in root: + raise RuntimeError( + "Node " + + str(node) + + " referenced target " + + node.target + + " but that target was not provided in ``root``!" + ) + targets_to_copy.append(node.target) + # Sort targets in ascending order of the # of atoms. + # This will ensure that less deeply nested attributes are assigned + # before more deeply nested attributes. For example, foo.bar + # will be assigned before foo.bar.baz. Otherwise, we might assign + # the user-provided ``foo.bar`` and wipe out the previously-assigned + # ``foo.bar.baz`` + targets_to_copy.sort(key=lambda t: t.count(".")) + for target_to_copy in targets_to_copy: + _assign_attr(root[target_to_copy], self, target_to_copy) + else: + raise RuntimeError("Unsupported type " + str(root) + " passed for root!") + + self.graph = graph + + # Store the Tracer class responsible for creating a Graph separately as part of the + # GraphModule state, except when the Tracer is defined in a local namespace. + # Locally defined Tracers are not pickleable. This is needed because torch.package will + # serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer + # to re-create the Graph during deserialization. + self._tracer_cls = None + if ( + self.graph._tracer_cls + and "" not in self.graph._tracer_cls.__qualname__ + ): + self._tracer_cls = self.graph._tracer_cls + + self._tracer_extras = {} + if self.graph._tracer_extras: + self._tracer_extras = self.graph._tracer_extras + + # Dictionary to store metadata + self.meta: Dict[str, Any] = {} + + # TorchScript breaks trying to compile the graph setter because of the + # continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842 + # + # Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway + __jit_unused_properties__ = ["graph"] + + @property + def graph(self) -> Graph: + """ + Return the ``Graph`` underlying this ``GraphModule`` + """ + return self._graph + + @graph.setter + def graph(self, g: Graph) -> None: + """ + Set the underlying ``Graph`` for this ``GraphModule``. This will internally + recompile the ``GraphModule`` so that the generated ``forward()`` function + corresponds to ``g`` + """ + assert isinstance(g, Graph), f"Expected a Graph instance, but got {type(g)}" + self._graph = g + g.owning_module = self + self.recompile() + + @compatibility(is_backward_compatible=False) + def to_folder(self, folder: Union[str, os.PathLike], module_name: str = "FxModule"): + """Dumps out module to ``folder`` with ``module_name`` so that it can be + imported with ``from import `` + + Args: + + folder (Union[str, os.PathLike]): The folder to write the code out to + + module_name (str): Top-level name to use for the ``Module`` while + writing out the code + """ + folder = Path(folder) + Path(folder).mkdir(exist_ok=True) + torch.save(self.state_dict(), folder / "state_dict.pt") + tab = " " * 4 + custom_builtins = "\n".join([v.import_str for v in _custom_builtins.values()]) + model_str = f""" +import torch +{custom_builtins} + +from torch.nn import * +class {module_name}(torch.nn.Module): + def __init__(self): + super().__init__() +""" + + def _gen_model_repr(module_name: str, module: torch.nn.Module) -> Optional[str]: + safe_reprs = [ + nn.Linear, + nn.Conv1d, + nn.Conv2d, + nn.Conv3d, + nn.BatchNorm1d, + nn.BatchNorm2d, + nn.BatchNorm3d, + ] + if type(module) in safe_reprs: + return f"{module.__repr__()}" + else: + return None + + blobified_modules = [] + for module_name, module in self.named_children(): + module_str = _gen_model_repr(module_name, module) + if module_str is None: + module_file = folder / f"{module_name}.pt" + torch.save(module, module_file) + blobified_modules.append(module_name) + module_repr = module.__repr__().replace("\r", " ").replace("\n", " ") + module_str = f"torch.load(r'{module_file}') # {module_repr}" + model_str += f"{tab*2}self.{module_name} = {module_str}\n" + + for buffer_name, buffer in self._buffers.items(): + if buffer is None: + continue + model_str += f"{tab*2}self.register_buffer('{buffer_name}', torch.empty({list(buffer.shape)}, dtype={buffer.dtype}))\n" + + for param_name, param in self._parameters.items(): + if param is None: + continue + model_str += f"{tab*2}self.{param_name} = torch.nn.Parameter(torch.empty({list(param.shape)}, dtype={param.dtype}))\n" + + model_str += ( + f"{tab*2}self.load_state_dict(torch.load(r'{folder}/state_dict.pt'))\n" + ) + model_str += f"{_addindent(self.code, 4)}\n" + + module_file = folder / "module.py" + module_file.write_text(model_str) + + init_file = folder / "__init__.py" + init_file.write_text("from .module import *") + + if len(blobified_modules) > 0: + warnings.warn( + "Was not able to save the following children modules as reprs -" + f"saved as pickled files instead: {blobified_modules}" + ) + + @compatibility(is_backward_compatible=True) + def add_submodule(self, target: str, m: torch.nn.Module) -> bool: + """ + Adds the given submodule to ``self``. + + This installs empty Modules where none exist yet if they are + subpaths of ``target``. + + Args: + target: The fully-qualified string name of the new submodule + (See example in ``nn.Module.get_submodule`` for how to + specify a fully-qualified string.) + m: The submodule itself; the actual object we want to + install in the current Module + + Return: + bool: Whether or not the submodule could be inserted. For + this method to return True, each object in the chain + denoted by ``target`` must either a) not exist yet, + or b) reference an ``nn.Module`` (not a parameter or + other attribute) + """ + *prefix, field = target.split(".") + mod: torch.nn.Module = self + + for item in prefix: + + submod = getattr(mod, item, None) + + if submod is None: + submod = torch.nn.Module() + setattr(mod, item, submod) + + if not isinstance(submod, torch.nn.Module): + return False + + mod = submod + + mod.add_module(field, m) + return True + + @compatibility(is_backward_compatible=True) + def delete_submodule(self, target: str) -> bool: + """ + Deletes the given submodule from ``self``. + + The module will not be deleted if ``target`` is not a valid + target. + + Args: + target: The fully-qualified string name of the new submodule + (See example in ``nn.Module.get_submodule`` for how to + specify a fully-qualified string.) + + Returns: + bool: Whether or not the target string referenced a + submodule we want to delete. A return value of ``False`` + means that the ``target`` was not a valid reference to + a submodule. + """ + atoms = target.split(".") + path, target_submod = atoms[:-1], atoms[-1] + mod: torch.nn.Module = self + + # Get the parent module + for item in path: + + if not hasattr(mod, item): + return False + + mod = getattr(mod, item) + + if not isinstance(mod, torch.nn.Module): + return False + + if not hasattr(mod, target_submod): + return False + + if not isinstance(getattr(mod, target_submod), torch.nn.Module): + return False + + delattr(mod, target_submod) + return True + + @compatibility(is_backward_compatible=True) + def delete_all_unused_submodules(self) -> None: + """ + Deletes all unused submodules from ``self``. + + A Module is considered "used" if any one of the following is + true: + 1. It has children that are used + 2. Its forward is called directly via a ``call_module`` node + 3. It has a non-Module attribute that is used from a + ``get_attr`` node + + This method can be called to clean up an ``nn.Module`` without + manually calling ``delete_submodule`` on each unused submodule. + """ + used: List[str] = [] + + for node in self.graph.nodes: + + if node.op == "call_module" or node.op == "get_attr": + + # A list of strings representing the different parts + # of the path. For example, `foo.bar.baz` gives us + # ["foo", "bar", "baz"] + fullpath = node.target.split(".") + + # If we're looking at multiple parts of a path, join + # join them with a dot. Otherwise, return that single + # element without doing anything to it. + def join_fn(x: str, y: str) -> str: + return ".".join([x, y] if y else [x]) + + # Progressively collect all the names of intermediate + # modules. For example, if we have the target + # `foo.bar.baz`, we'll add `foo`, `foo.bar`, and + # `foo.bar.baz` to the list. + for path in itertools.accumulate(fullpath, join_fn): + used.append(path) + + # For a `call_module` node, also register all recursive submodules + # as used + if node.op == "call_module": + try: + submod = self.get_submodule(node.target) + + for submod_name, _ in submod.named_modules(): + if submod_name != "": + used.append(".".join([node.target, submod_name])) + except AttributeError: + # Node referenced nonexistent submodule, don't need to + # worry about GCing anything + pass + + to_delete = [name for name, _ in self.named_modules() if name not in used] + + for name in to_delete: + self.delete_submodule(name) + + @property + def code(self) -> str: + """ + Return the Python code generated from the ``Graph`` underlying this + ``GraphModule``. + """ + if not hasattr(self, "_code"): + raise RuntimeError( + "Code has not been generated! Please report a bug to PyTorch" + ) + return self._code + + @compatibility(is_backward_compatible=True) + def recompile(self) -> PythonCode: + """ + Recompile this GraphModule from its ``graph`` attribute. This should be + called after editing the contained ``graph``, otherwise the generated + code of this ``GraphModule`` will be out of date. + """ + if isinstance(self._graph._codegen, _PyTreeCodeGen): + self._in_spec = self._graph._codegen.pytree_info.in_spec + self._out_spec = self._graph._codegen.pytree_info.out_spec + python_code = self._graph.python_code(root_module="self") + self._code = python_code.src + self._lineno_map = python_code._lineno_map + + cls = type(self) + co_fields = self._graph._co_fields if hasattr(self._graph, "_co_fields") else {} + cls.forward = _forward_from_src(self._code, python_code.globals, co_fields) + + # Determine whether this class explicitly defines a __call__ implementation + # to wrap. If it does, save it in order to have wrapped_call invoke it. + # If it does not, wrapped_call can use a dynamic call to super() instead. + # In most cases, super().__call__ should be torch.nn.Module.__call__. + # We do not want to hold a reference to Module.__call__ here; doing so will + # bypass patching of torch.nn.Module.__call__ done while symbolic tracing. + cls_call = cls.__call__ if "__call__" in vars(cls) else None + + if "_wrapped_call" not in vars(cls): + cls._wrapped_call = _WrappedCall(cls, cls_call) # type: ignore[attr-defined] + + def call_wrapped(self, *args, **kwargs): + return self._wrapped_call(self, *args, **kwargs) + + cls.__call__ = call_wrapped # type: ignore[method-assign] + + return python_code + + # Passing Tracer as argument allows subclasses extending fx.GraphModule + # define their own Tracer (extending fx.Tracer). + def __reduce_deploy__(self, importer: Importer): + dict_without_graph = self.__dict__.copy() + dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__ + del dict_without_graph["_graph"] + + python_code = self.recompile() + import_block = _format_import_block(python_code.globals, importer) + return (reduce_deploy_graph_module, (dict_without_graph, import_block)) + + def __reduce_package__(self, exporter: PackageExporter): + dict_without_graph = self.__dict__.copy() + dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__ + del dict_without_graph["_graph"] + + generated_module_name = f"fx-generated._{exporter.get_unique_id()}" + python_code = self.recompile() + import_block = _format_import_block(python_code.globals, exporter.importer) + module_code = import_block + self.code + exporter.save_source_string(generated_module_name, module_code) + return ( + reduce_package_graph_module, + (dict_without_graph, generated_module_name), + ) + + def __reduce__(self): + """ + Serialization of GraphModule. We serialize only the generated code, not + the underlying ``Graph``. This is because ``Graph`` does not have on-disk + backward-compatibility guarantees, whereas Python source code does. + On the deserialization side, we symbolically trace through the generated + code to regenerate the underlying ``Graph`` + """ + dict_without_graph = self.__dict__.copy() + python_code = self.recompile() + import_block = _format_import_block(python_code.globals, sys_importer) + del dict_without_graph["_graph"] + return (reduce_graph_module, (dict_without_graph, import_block)) + + def _deepcopy_init(self): + return GraphModule.__init__ + + # because __reduce__ is defined for serialization, + # we need to define deepcopy otherwise it will call __reduce__ + # and cause symbolic tracing to occur every time we try to copy the object + def __deepcopy__(self, memo): + res = type(self).__new__(type(self)) + memo[id(self)] = res + fake_mod = _CodeOnlyModule(copy.deepcopy(self.__dict__, memo)) + self._deepcopy_init()(res, fake_mod, fake_mod.__dict__["_graph"]) + # hooks are lost during `GraphModule.__init__`, so we need to copy over + # them explicitly, note right now we are only copying state_dict related + # hooks, to reduce bc-related issues, we can copy forward/backward related + # hooks in the future as well if needed + extra_preserved_attrs = [ + "_state_dict_hooks", + "_load_state_dict_pre_hooks", + "_load_state_dict_post_hooks", + ] + for attr in extra_preserved_attrs: + if attr in self.__dict__: + setattr(res, attr, copy.deepcopy(self.__dict__[attr], memo)) + res.meta = copy.deepcopy(getattr(self, "meta", {}), memo) + if _USER_PRESERVED_ATTRIBUTES_KEY in res.meta: + for attr_name, attr in res.meta[_USER_PRESERVED_ATTRIBUTES_KEY].items(): + setattr(res, attr_name, attr) + return res + + def __copy__(self): + res = GraphModule(self, self.graph) + res.meta = getattr(self, "meta", {}) + return res + + @compatibility(is_backward_compatible=False) + def print_readable(self, print_output=True): + """ + Return the Python code generated for current GraphModule and its children GraphModules + """ + verbose_python_code = self._graph.python_code(root_module="self", verbose=True) + module_code = verbose_python_code.src + module_code = module_code.lstrip("\n") + module_code = f"class {self._get_name()}(torch.nn.Module):\n" + module_code + module_code = _addindent(module_code, 4) + + submodule_code_list = [""] + for submodule in self.children(): + if isinstance(submodule, GraphModule): + submodule_code_list.append(submodule.print_readable(print_output=False)) + submodule_code = "\n".join(submodule_code_list) + submodule_code = _addindent(submodule_code, 4) + + output = module_code + submodule_code + if print_output: + print(module_code + submodule_code) + return output + + def __str__(self) -> str: + orig_str = super().__str__() + print_readable_reminder = ( + "# To see more debug info, please use `graph_module.print_readable()`" + ) + return "\n".join([orig_str, self._code, print_readable_reminder]) + + def _replicate_for_data_parallel(self): + new_gm = self.__copy__() + new_gm._is_replica = True + return new_gm + + +# workarounds for issues in __torch_function__ + +# WAR for __torch_function__ not handling tensor lists, +# fix is in https://github.com/pytorch/pytorch/pull/34725 +# orig_cat = torch.cat +# def patched_cat(*args, **kwargs): +# tensors = args[0] +# for t in tensors: +# if isinstance(t, Proxy): +# return t.__torch_function__(patched_cat, (), args, kwargs) +# return orig_cat(*args, **kwargs) +# patched_cat.__module__ = 'torch' +# patched_cat.__name__ = 'cat' +# torch.cat = patched_cat diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/fx/immutable_collections.py b/evalkit_tf437/lib/python3.10/site-packages/torch/fx/immutable_collections.py new file mode 100644 index 0000000000000000000000000000000000000000..a359335f6ecedaff9eca88b8a1f726573fe2e5b1 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/fx/immutable_collections.py @@ -0,0 +1,54 @@ +from typing import Any, Dict, Iterable, List, Tuple + +from ._compatibility import compatibility +from torch.utils._pytree import Context, register_pytree_node + +__all__ = ["immutable_list", "immutable_dict"] + +_help_mutation = """\ +If you are attempting to modify the kwargs or args of a torch.fx.Node object, +instead create a new copy of it and assign the copy to the node: + new_args = ... # copy and mutate args + node.args = new_args +""" + +def _no_mutation(self, *args, **kwargs): + raise NotImplementedError(f"'{type(self).__name__}' object does not support mutation. {_help_mutation}") + +def _create_immutable_container(base, mutable_functions): + container = type('immutable_' + base.__name__, (base,), {}) + for attr in mutable_functions: + setattr(container, attr, _no_mutation) + return container + +immutable_list = _create_immutable_container(list, + ['__delitem__', '__iadd__', '__imul__', '__setitem__', 'append', + 'clear', 'extend', 'insert', 'pop', 'remove']) +immutable_list.__reduce__ = lambda self: (immutable_list, (tuple(iter(self)),)) +immutable_list.__hash__ = lambda self: hash(tuple(self)) + +compatibility(is_backward_compatible=True)(immutable_list) + +immutable_dict = _create_immutable_container(dict, ['__delitem__', '__setitem__', 'clear', 'pop', 'popitem', 'update']) +immutable_dict.__reduce__ = lambda self: (immutable_dict, (iter(self.items()),)) +immutable_dict.__hash__ = lambda self: hash(tuple(self.items())) +compatibility(is_backward_compatible=True)(immutable_dict) + + +# Register immutable collections for PyTree operations + +def _immutable_dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]: + return list(d.values()), list(d.keys()) + +def _immutable_dict_unflatten(values: Iterable[Any], context: Context) -> Dict[Any, Any]: + return immutable_dict(dict(zip(context, values))) + +def _immutable_list_flatten(d: List[Any]) -> Tuple[List[Any], Context]: + return d, None + +def _immutable_list_unflatten(values: Iterable[Any], context: Context) -> List[Any]: + return immutable_list(values) + + +register_pytree_node(immutable_dict, _immutable_dict_flatten, _immutable_dict_unflatten) +register_pytree_node(immutable_list, _immutable_list_flatten, _immutable_list_unflatten) diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/fx/operator_schemas.py b/evalkit_tf437/lib/python3.10/site-packages/torch/fx/operator_schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..3e796ba65fc7813f4c210d7a196fe53f89b1e286 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/fx/operator_schemas.py @@ -0,0 +1,440 @@ +import torch +import inspect +import numbers +import types +import typing +import enum +import warnings +from typing import Any, Callable, Dict, List, Optional, Tuple, NamedTuple, cast, TYPE_CHECKING +from torch._jit_internal import boolean_dispatched +from ._compatibility import compatibility +from torch._ops import OpOverloadPacket, OpOverload + +if TYPE_CHECKING: + from .node import Argument + +__all__ = ["ArgsKwargsPair", "check_for_mutable_operation", "get_signature_for_torch_op", "create_type_hint", + "type_matches", "normalize_function", "normalize_module"] + +@compatibility(is_backward_compatible=False) +class ArgsKwargsPair(NamedTuple): + """ + Simple named tuple for wrapping args/kwargs pairs. + """ + args: Tuple[Any, ...] + kwargs: Dict[str, Any] + +_manual_overrides : Dict[Callable, List[inspect.Signature]] = {} + +def _nonzero_schemas(): + signatures = [] + + def nonzero(self): + pass + signatures.append(inspect.signature(nonzero)) + + def nonzero(self, *, as_tuple : bool): # type: ignore[no-redef] + pass + signatures.append(inspect.signature(nonzero)) + + return signatures + +_manual_overrides[torch.nonzero] = _nonzero_schemas() + +class _FakeGlobalNamespace: + def __getattr__(self, name): + if name == 'torch': + return torch + raise RuntimeError('Expected a torch namespace lookup') + +_type_eval_globals = {'Tensor' : torch.Tensor, 'Device' : torch.device, 'Layout' : torch.layout, + 'number' : numbers.Number, 'Future' : torch.jit.Future, + 'AnyEnumType' : enum.Enum, 'QScheme' : torch.qscheme, + '__torch__': _FakeGlobalNamespace(), 'NoneType': type(None), + 't': typing.TypeVar('t')} +for k in dir(typing): + _type_eval_globals[k] = getattr(typing, k) + +def _torchscript_type_to_python_type(ts_type : 'torch._C.JitType') -> Any: + """ + Convert a TorchScript type to a Python type (including subtypes) via + eval'ing the annotation_str. _type_eval_globals sets up expressions + like "List" and "Future" to map to actual types (typing.List and jit.Future) + """ + return eval(ts_type.annotation_str, _type_eval_globals) + +def _torchscript_schema_to_signature_impl(ts_schema : torch._C.FunctionSchema) -> inspect.Signature: + from inspect import Parameter + parameters : List[Parameter] = [] + for arg in ts_schema.arguments: + arg_type = _torchscript_type_to_python_type(arg.type) + default = arg.default_value if arg.has_default_value() else Parameter.empty + # TODO: Figure out if this is safe. It seems like when generating the type signatures for + # PythonArgParser, we emit signatures with `input` instead of `self` as the first tensor + # argument name. Downstream, if someone converts that positional argument to a keyword + # argument, the name mismatch will break things, so here we're going to normalize the + # name to "input" + name = arg.name if arg.name != 'self' else 'input' + kind = Parameter.KEYWORD_ONLY if arg.kwarg_only else Parameter.POSITIONAL_OR_KEYWORD + # "from" is a keyword therefore it must be a POSITIONAL_ONLY argument + if name == "from": + assert kind == Parameter.POSITIONAL_OR_KEYWORD + # ParameterKind type is internal implementation detail to inspec package + # which makes it hard to do type annotation + kind = Parameter.POSITIONAL_ONLY # type: ignore[assignment] + # This renders all previous arguments to positional only + for idx, p in enumerate(parameters): + assert p.kind == Parameter.POSITIONAL_OR_KEYWORD + parameters[idx] = Parameter(name=p.name, kind=Parameter.POSITIONAL_ONLY, default=p.default, annotation=p.annotation) + parameters.append(Parameter(name=name, kind=kind, default=default, annotation=arg_type)) + return_types = [_torchscript_type_to_python_type(ret.type) for ret in ts_schema.returns] + if len(return_types) == 0: + return_type = None + elif len(return_types) == 1: + return_type = return_types[0] + else: + return_type = tuple(return_types) + + return inspect.Signature(parameters, return_annotation=return_type) + +_SCHEMA_TO_SIGNATURE_CACHE : Dict[Tuple[str, str], inspect.Signature] = {} + +def _torchscript_schema_to_signature(ts_schema : torch._C.FunctionSchema) -> inspect.Signature: + # Cached as it's called in the hot path of FakeTensor dispatch + cache_key = ts_schema.name, ts_schema.overload_name + cache_val = _SCHEMA_TO_SIGNATURE_CACHE.get(cache_key) + if cache_val is not None: + return cache_val + + res = _torchscript_schema_to_signature_impl(ts_schema) + _SCHEMA_TO_SIGNATURE_CACHE[cache_key] = res + return res + +@compatibility(is_backward_compatible=False) +def check_for_mutable_operation(target : Callable, args : Tuple['Argument', ...], kwargs : Dict[str, 'Argument']): + signatures, schemas = get_signature_for_torch_op(target, return_schemas=True) + + if signatures and schemas: + matched_schemas = [] + + # Iterate through all of the schema until we find one that matches + # If one matches, populate `new_args_and_kwargs` with the new args/kwargs + # values. If none matches, `new_args_and_kwargs` will be None + for candidate_signature, schema in zip(signatures, schemas): + try: + candidate_signature.bind(*args, **kwargs) + matched_schemas.append((candidate_signature, schema)) + except TypeError as e: + continue + + def throw_if_mutable(schema): + if schema.is_mutable: + raise RuntimeError(f'Tried to trace mutable operation {schema}. FX only supports functional ' + f'code, so operations that mutate operands in-place (e.g. via `out` arguments) ' + f'are not supported') + + if len(matched_schemas) == 0: + # Did not match any schema. Cannot check for mutation + pass + elif len(matched_schemas) == 1: + # Matched exactly one schema, unambiguous + _, schema_to_check = matched_schemas[0] + throw_if_mutable(schema_to_check) + pass + else: + # Ambiguous schema match. Since mutability checking is best effort, + # do nothing. + pass + +@compatibility(is_backward_compatible=False) +def get_signature_for_torch_op(op : Callable, return_schemas : bool = False): + """ + Given an operator on the `torch` namespace, return a list of `inspect.Signature` + objects corresponding to the overloads of that op.. May return `None` if a signature + could not be retrieved. + + Args: + op (Callable): An operator on the `torch` namespace to look up a signature for + + Returns: + Optional[List[inspect.Signature]]: A list of signatures for the overloads of this + operator, or None if the operator signatures could not be retrieved. If + return_schemas=True, returns a tuple containing the optional Python signatures + and the optional TorchScript Function signature + """ + if isinstance(op, OpOverload): + schemas = [op._schema] + elif isinstance(op, OpOverloadPacket): + schemas = [getattr(op, overload)._schema for overload in op.overloads()] + else: + override = _manual_overrides.get(op) + if override: + return (override, None) if return_schemas else None + + aten_fn = torch.jit._builtins._find_builtin(op) + + if aten_fn is None: + return (None, None) if return_schemas else None + schemas = torch._C._jit_get_schemas_for_operator(aten_fn) + + signatures = [_torchscript_schema_to_signature(schema) for schema in schemas] + return (signatures, schemas) if return_schemas else signatures + +@compatibility(is_backward_compatible=False) +def create_type_hint(x): + try: + if isinstance(x, (list, tuple)): + # todo(chilli): Figure out the right way for mypy to handle this + if isinstance(x, list): + def ret_type(x): + return List[x] # type: ignore[valid-type] + else: + def ret_type(x): + return Tuple[x, ...] + if len(x) == 0: + return ret_type(Any) + base_type = x[0] + for t in x: + if issubclass(t, base_type): + continue + elif issubclass(base_type, t): + base_type = t + else: + return ret_type(Any) + return ret_type(base_type) + except Exception as e: + # We tried to create a type hint for list but failed. + warnings.warn(f"We were not able to successfully create type hint from the type {x}") + pass + return x + +@compatibility(is_backward_compatible=False) +def type_matches(signature_type : Any, argument_type : Any): + sig_origin_type = getattr(signature_type, '__origin__', signature_type) + + if signature_type is argument_type: + return True + + # Union types in signature. Given type needs to match one of the + # contained types in the Union + if sig_origin_type is typing.Union and signature_type != argument_type: + sig_contained = signature_type.__args__ + return any(type_matches(c, argument_type) for c in sig_contained) + + if signature_type is List[int] and argument_type is int: + # int can be promoted to List[int] + return True + + if getattr(signature_type, '__origin__', None) in {list, List}: + sig_el_type = signature_type.__args__[0] + if not inspect.isclass(sig_el_type): + warnings.warn( + f"Does not support nested parametric types, got {signature_type}. Please file a bug.") + return False + if getattr(argument_type, '__origin__', None) in {list, List}: + return issubclass(argument_type.__args__[0], sig_el_type) + + def is_homogeneous_tuple(t): + if getattr(t, "__origin__", None) not in {tuple, Tuple}: + return False + contained = t.__args__ + if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason + return True + return all((c is Ellipsis) or issubclass(c, sig_el_type) for c in contained) + + # Tuple[T] is accepted for List[T] parameters + return is_homogeneous_tuple(argument_type) + + # Dtype is an int in schemas + if signature_type is int and argument_type is torch.dtype: + return True + + if signature_type is numbers.Number and argument_type in {int, float}: + return True + if inspect.isclass(argument_type) and inspect.isclass(signature_type): + return issubclass(argument_type, signature_type) + + return False + +@compatibility(is_backward_compatible=False) +def normalize_function( + target: Callable, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, arg_types : Optional[Tuple[Any]] = None, + kwarg_types : Optional[Dict[str, Any]] = None, + normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]: + """ + Returns normalized arguments to PyTorch functions. This means that + `args/kwargs` will be matched up to the functional's + signature and return exclusively kwargs in positional order if + `normalize_to_only_use_kwargs` is True. + Also populates default values. Does not support positional-only + parameters or varargs parameters (*args, **kwargs). Does not support modules. + + May require `arg_types` and `kwarg_types` in order to disambiguate overloads. + + Args: + target (Callable): Function that we are normalizing + args (Tuple[Any]): Tuple of args to the function + kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function + arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args + kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs + normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. + + Returns: + + Returns normalized_args_and_kwargs, or `None` if not successful. + """ + if kwargs is None: + kwargs = {} + new_args_and_kwargs = None + if not isinstance(target, types.BuiltinFunctionType) and not ( + isinstance(target, (OpOverloadPacket, OpOverload)) + ): + target_for_analysis = target + if target in boolean_dispatched: + # HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have + # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false` + # branches of the dispatch have exactly the same signature. If they do, use the `true` + # branch signature for analysis. Otherwise, leave this un-normalized + assert not isinstance(target, str) + dispatched = boolean_dispatched[target] + if_true, if_false = dispatched['if_true'], dispatched['if_false'] + if inspect.signature(if_true).parameters != inspect.signature(if_false).parameters: + return None + target_for_analysis = if_true + + assert callable(target_for_analysis) + sig = inspect.signature(inspect.unwrap(target_for_analysis)) + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, normalize_to_only_use_kwargs) + else: + assert callable(target) + torch_op_schemas = get_signature_for_torch_op(target) + matched_schemas = [] + if torch_op_schemas: + # Iterate through all of the schema until we find one that matches + # If one matches, populate `new_args_and_kwargs` with the new args/kwargs + # values. If none matches, `new_args_and_kwargs` will be None + for candidate_signature in torch_op_schemas: + try: + candidate_signature.bind(*args, **kwargs) + matched_schemas.append(candidate_signature) + except TypeError as e: + continue + + if len(matched_schemas) == 0: + # Did not match any schema. Cannot normalize + pass + elif len(matched_schemas) == 1: + # Matched exactly one schema, unambiguous + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(matched_schemas[0], args, kwargs, + normalize_to_only_use_kwargs) + else: + if arg_types is not None or kwarg_types is not None: + arg_types = arg_types if arg_types else cast(Tuple[Any], ()) + kwarg_types = kwarg_types if kwarg_types else {} + for candidate_signature in torch_op_schemas: + sig_matches = True + try: + bound_types = candidate_signature.bind(*arg_types, **kwarg_types) + for arg_name, arg_type in bound_types.arguments.items(): + param = candidate_signature.parameters[arg_name] + sig_matches = sig_matches and type_matches(param.annotation, arg_type) + except TypeError as e: + sig_matches = False + if sig_matches: + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(candidate_signature, args, kwargs, + normalize_to_only_use_kwargs) + break + else: + # Matched more than one schema. In this situation, the caller must provide the types of + # the arguments of the overload they expect. + schema_printouts = '\n'.join(str(schema) for schema in matched_schemas) + raise RuntimeError(f'Tried to normalize arguments to {torch.typename(target)} but ' + f'the schema match was ambiguous! Please provide argument types to ' + f'the normalize_arguments() call. Available schemas:\n{schema_printouts}') + + return new_args_and_kwargs + +@compatibility(is_backward_compatible=False) +def normalize_module( + root: torch.nn.Module, target: str, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, + normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]: + """ + Returns normalized arguments to PyTorch modules. This means that + `args/kwargs` will be matched up to the functional's + signature and return exclusively kwargs in positional order if + `normalize_to_only_use_kwargs` is True. + Also populates default values. Does not support positional-only + parameters or varargs parameters (*args, **kwargs). + + Args: + root (nn.Module): root module upon which we query modules + target (Callable): Function that we are normalizing + args (Tuple[Any]): Tuple of args to the function + kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function + normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. + + Returns: + + Returns normalized_args_and_kwargs, or `None` if not successful. + """ + try: + submod = root.get_submodule(target) + except AttributeError as e: + raise RuntimeError(f"Tried to normalize node with target {target} but root did not " + f"have that target!") from e + if hasattr(submod.__class__, '__name__'): + classname = submod.__class__.__name__ + if getattr(torch.nn, classname, None) == submod.__class__: + sig = inspect.signature(inspect.unwrap(submod.forward)) + if kwargs is None: + kwargs = {} + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, + normalize_to_only_use_kwargs) + return new_args_and_kwargs + return None + +def _args_kwargs_to_normalized_args_kwargs(sig : inspect.Signature, args : Tuple[Any, ...], + kwargs : Dict[str, Any], + normalize_to_only_use_kwargs : bool) -> Optional[ArgsKwargsPair]: + """ + Given a call target, args, and kwargs, return the arguments normalized into + an ArgsKwargsPair, or None if the type signature is not supported by + this normalization. + + Args: + + sig (inspect.Signature): Signature object for the target + args (Tuple): Arguments that appear at the callsite for `target` + kwargs (Dict): Keyword arguments that appear at the callsite for `target` + normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. + + Returns: + + Optional[ArgsKwargsPair]: Normalized args and kwargs for `target`, or `None` if + this target is not supported. + """ + + # Don't currently support positional-only + # or varargs (*args, **kwargs) signatures + supported_parameter_types = { + inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY} + if any(p.kind not in supported_parameter_types for p in sig.parameters.values()): + # Add an exception for one signature, which is common for random/uniform, i.e.: + # Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None + # `from` is Python keyword and as such functions with that signature should have + # positional-only args, but at the same time they could be dispatched as kwargs + if list(sig.parameters.keys()) != ['input', 'from', 'to', 'generator']: + return None + + bound_args = sig.bind(*args, **kwargs) + bound_args.apply_defaults() + + new_kwargs : Dict[str, Any] = {} + new_args : List[Any] = [] + for i, param in enumerate(sig.parameters): + if not normalize_to_only_use_kwargs and i < len(args): + new_args.append(bound_args.arguments[param]) + else: + new_kwargs[param] = bound_args.arguments[param] + + return ArgsKwargsPair(tuple(new_args), new_kwargs) diff --git a/evalkit_tf437/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b8beb5ad2490713462058faa693b9173e88f7b5 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b34ef3bac735c84b15d0cb3df24157a758f9caa3d50e29490e1cbe6d2a9bc6a7 +size 435665 diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimd.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimd.c new file mode 100644 index 0000000000000000000000000000000000000000..6bc9022a58d3cd087d167d354224ded89be91884 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimd.c @@ -0,0 +1,27 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float *src = (float*)argv[argc-1]; + float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]); + /* MAXMIN */ + int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0); + ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0); + /* ROUNDING */ + ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0); +#ifdef __aarch64__ + { + double *src2 = (double*)argv[argc-1]; + float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]); + /* MAXMIN */ + ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0); + ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0); + /* ROUNDING */ + ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0); + } +#endif + return ret; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimddp.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimddp.c new file mode 100644 index 0000000000000000000000000000000000000000..e7068ce02e19856349873f40d03caff438efb6fe --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimddp.c @@ -0,0 +1,16 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + unsigned char *src = (unsigned char*)argv[argc-1]; + uint8x16_t v1 = vdupq_n_u8(src[0]), v2 = vdupq_n_u8(src[1]); + uint32x4_t va = vdupq_n_u32(3); + int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0); +#ifdef __aarch64__ + ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0); +#endif + return ret; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdfhm.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdfhm.c new file mode 100644 index 0000000000000000000000000000000000000000..54e328098d17b57445024c9859cd4992492c348a --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdfhm.c @@ -0,0 +1,19 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float16_t *src = (float16_t*)argv[argc-1]; + float *src2 = (float*)argv[argc-2]; + float16x8_t vhp = vdupq_n_f16(src[0]); + float16x4_t vlhp = vdup_n_f16(src[1]); + float32x4_t vf = vdupq_n_f32(src2[0]); + float32x2_t vlf = vdup_n_f32(src2[1]); + + int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0); + ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0); + + return ret; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdhp.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdhp.c new file mode 100644 index 0000000000000000000000000000000000000000..e2de0306e0acaeda3b861756e598a132f8e1ca9f --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdhp.c @@ -0,0 +1,15 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float16_t *src = (float16_t*)argv[argc-1]; + float16x8_t vhp = vdupq_n_f16(src[0]); + float16x4_t vlhp = vdup_n_f16(src[1]); + + int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0); + ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0); + return ret; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx.c new file mode 100644 index 0000000000000000000000000000000000000000..26ae18466740b230f9b964ebb4c72c54f13c73ee --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX__ + #error "HOST/ARCH doesn't support AVX" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1])); + return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx2.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx2.c new file mode 100644 index 0000000000000000000000000000000000000000..ddde868f1b586c7b066c2284556b65ec5fef834e --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx2.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX2__ + #error "HOST/ARCH doesn't support AVX2" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1])); + return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_clx.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_clx.c new file mode 100644 index 0000000000000000000000000000000000000000..81edcd06700518269420f0cf6192e552581c17d8 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_clx.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512VNNI__ + #error "HOST/ARCH doesn't support CascadeLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + /* VNNI */ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c new file mode 100644 index 0000000000000000000000000000000000000000..5799f122b511420eb16d066c31dc218bc4fae110 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c @@ -0,0 +1,24 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__) + #error "HOST/ARCH doesn't support CannonLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + /* IFMA */ + a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512()); + /* VMBI */ + a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_icl.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_icl.c new file mode 100644 index 0000000000000000000000000000000000000000..3cf44d73164b6a80eca5f23f699bd00dba1f623e --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_icl.c @@ -0,0 +1,26 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__) + #error "HOST/ARCH doesn't support IceLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + /* VBMI2 */ + a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512()); + /* BITLAG */ + a = _mm512_popcnt_epi8(a); + /* VPOPCNTDQ */ + a = _mm512_popcnt_epi64(a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knl.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knl.c new file mode 100644 index 0000000000000000000000000000000000000000..cb55e57aa220ebc8e1b638f7bfb470cff6725ea2 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knl.c @@ -0,0 +1,25 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512ER__) || !defined(__AVX512PF__) + #error "HOST/ARCH doesn't support Knights Landing AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + int base[128]={}; + __m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]); + /* ER */ + __m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad)); + /* PF */ + _mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1); + return base[0]; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knm.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knm.c new file mode 100644 index 0000000000000000000000000000000000000000..2c426462bd34e00f9a0b04e01fb124784c2afb7b --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knm.c @@ -0,0 +1,30 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__) + #error "HOST/ARCH doesn't support Knights Mill AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + __m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]); + + /* 4FMAPS */ + b = _mm512_4fmadd_ps(b, b, b, b, b, NULL); + /* 4VNNIW */ + a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL); + /* VPOPCNTDQ */ + a = _mm512_popcnt_epi64(a); + + a = _mm512_add_epi32(a, _mm512_castps_si512(b)); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_skx.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_skx.c new file mode 100644 index 0000000000000000000000000000000000000000..8840efb7e5eefcb762b69bf8d40b79406f6798a5 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_skx.c @@ -0,0 +1,26 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__) + #error "HOST/ARCH doesn't support SkyLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); + /* VL */ + __m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1)); + /* DQ */ + __m512i b = _mm512_broadcast_i32x8(a); + /* BW */ + b = _mm512_abs_epi16(b); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(b)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_spr.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_spr.c new file mode 100644 index 0000000000000000000000000000000000000000..9710d0b2fe2f2ac1fc9e19c1c9b4688807efd6d7 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_spr.c @@ -0,0 +1,26 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512FP16__) + #error "HOST/ARCH doesn't support Sapphire Rapids AVX512FP16 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ +/* clang has a bug regarding our spr coode, see gh-23730. */ +#if __clang__ +#error +#endif + __m512h a = _mm512_loadu_ph((void*)argv[argc-1]); + __m512h temp = _mm512_fmadd_ph(a, a, a); + _mm512_storeu_ph((void*)(argv[argc-1]), temp); + return 0; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512cd.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512cd.c new file mode 100644 index 0000000000000000000000000000000000000000..5e29c79e34a73bdfbbcc2571333bfdd28007e07f --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512cd.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512CD__ + #error "HOST/ARCH doesn't support AVX512CD" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512f.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512f.c new file mode 100644 index 0000000000000000000000000000000000000000..d0eb7b1ad5c63995a995c8fe80f59fd8131538d1 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512f.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512F__ + #error "HOST/ARCH doesn't support AVX512F" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_f16c.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_f16c.c new file mode 100644 index 0000000000000000000000000000000000000000..fdf36cec580ce9c24fbb9d2a60fdfcaa824b3f11 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_f16c.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __F16C__ + #error "HOST/ARCH doesn't support F16C" + #endif +#endif + +#include +#include + +int main(int argc, char **argv) +{ + __m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1])); + __m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2])); + return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8))); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma3.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma3.c new file mode 100644 index 0000000000000000000000000000000000000000..bfeef22b5f0e86becd6b9f7a8b5b0f4bdea73202 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma3.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__FMA__) && !defined(__AVX2__) + #error "HOST/ARCH doesn't support FMA3" + #endif +#endif + +#include +#include + +int main(int argc, char **argv) +{ + __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]); + a = _mm256_fmadd_ps(a, a, a); + return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma4.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma4.c new file mode 100644 index 0000000000000000000000000000000000000000..0ff17a483385bec07f9aef023b16fc331e66fb6f --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma4.c @@ -0,0 +1,13 @@ +#include +#ifdef _MSC_VER + #include +#else + #include +#endif + +int main(int argc, char **argv) +{ + __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]); + a = _mm256_macc_ps(a, a, a); + return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon.c new file mode 100644 index 0000000000000000000000000000000000000000..8c64f864dea63cb9c4ee60249e52b1ad528751c7 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon.c @@ -0,0 +1,19 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + // passing from untraced pointers to avoid optimizing out any constants + // so we can test against the linker. + float *src = (float*)argv[argc-1]; + float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]); + int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0); +#ifdef __aarch64__ + double *src2 = (double*)argv[argc-2]; + float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]); + ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0); +#endif + return ret; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_fp16.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_fp16.c new file mode 100644 index 0000000000000000000000000000000000000000..f3b949770db66a03a6221a230e75e87f67359759 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_fp16.c @@ -0,0 +1,11 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + short *src = (short*)argv[argc-1]; + float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16(src)); + return (int)vgetq_lane_f32(v_z4, 0); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c new file mode 100644 index 0000000000000000000000000000000000000000..a039159ddeed006d62f07250a3a1dbb5abfcb6ac --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c @@ -0,0 +1,21 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float *src = (float*)argv[argc-1]; + float32x4_t v1 = vdupq_n_f32(src[0]); + float32x4_t v2 = vdupq_n_f32(src[1]); + float32x4_t v3 = vdupq_n_f32(src[2]); + int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0); +#ifdef __aarch64__ + double *src2 = (double*)argv[argc-2]; + float64x2_t vd1 = vdupq_n_f64(src2[0]); + float64x2_t vd2 = vdupq_n_f64(src2[1]); + float64x2_t vd3 = vdupq_n_f64(src2[2]); + ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0); +#endif + return ret; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_popcnt.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_popcnt.c new file mode 100644 index 0000000000000000000000000000000000000000..813c461f05b36b52c855f31d621a23ab7ee0c642 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_popcnt.c @@ -0,0 +1,32 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__SSE4_2__) && !defined(__POPCNT__) + #error "HOST/ARCH doesn't support POPCNT" + #endif +#endif + +#ifdef _MSC_VER + #include +#else + #include +#endif + +int main(int argc, char **argv) +{ + // To make sure popcnt instructions are generated + // and been tested against the assembler + unsigned long long a = *((unsigned long long*)argv[argc-1]); + unsigned int b = *((unsigned int*)argv[argc-2]); + +#if defined(_M_X64) || defined(__x86_64__) + a = _mm_popcnt_u64(a); +#endif + b = _mm_popcnt_u32(b); + return (int)a + b; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse.c new file mode 100644 index 0000000000000000000000000000000000000000..602b74e7bc437ee4fdfbc375280f423700caa49e --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE__ + #error "HOST/ARCH doesn't support SSE" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse2.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse2.c new file mode 100644 index 0000000000000000000000000000000000000000..33826a9ed1a53ef27e9c686d870d31d4b12f1736 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse2.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE2__ + #error "HOST/ARCH doesn't support SSE2" + #endif +#endif + +#include + +int main(void) +{ + __m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128()); + return _mm_cvtsi128_si32(a); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse3.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse3.c new file mode 100644 index 0000000000000000000000000000000000000000..d47c20f74be1f83afd1962917438507c609e5413 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse3.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE3__ + #error "HOST/ARCH doesn't support SSE3" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse41.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse41.c new file mode 100644 index 0000000000000000000000000000000000000000..7c80238a3bc1809cdec133c057b1bf0ff46ce64e --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse41.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE4_1__ + #error "HOST/ARCH doesn't support SSE41" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_floor_ps(_mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse42.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse42.c new file mode 100644 index 0000000000000000000000000000000000000000..f60e18f3c4f13d58bc9e8ac84752612b5ad11830 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse42.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE4_2__ + #error "HOST/ARCH doesn't support SSE42" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_ssse3.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_ssse3.c new file mode 100644 index 0000000000000000000000000000000000000000..fde390d6a37d3e2c929b7a6841efa42e618742e5 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_ssse3.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSSE3__ + #error "HOST/ARCH doesn't support SSSE3" + #endif +#endif + +#include + +int main(void) +{ + __m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128()); + return (int)_mm_cvtsi128_si32(a); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx2.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx2.c new file mode 100644 index 0000000000000000000000000000000000000000..410fb29d6db5abab4c6b2a99308f99cce07c10b2 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx2.c @@ -0,0 +1,13 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector unsigned long long v_uint64x2; + +int main(void) +{ + v_uint64x2 z2 = (v_uint64x2){0, 0}; + z2 = (v_uint64x2)vec_cmpeq(z2, z2); + return (int)vec_extract(z2, 0); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx3.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx3.c new file mode 100644 index 0000000000000000000000000000000000000000..857526535aa8ff728d8ccd055d766bf4581c6eed --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx3.c @@ -0,0 +1,13 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector unsigned int v_uint32x4; + +int main(void) +{ + v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0}; + z4 = vec_absd(z4, z4); + return (int)vec_extract(z4, 0); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx4.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx4.c new file mode 100644 index 0000000000000000000000000000000000000000..a6acc7384dd95f7ef51d17c85492342dde353d0d --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx4.c @@ -0,0 +1,14 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector unsigned int v_uint32x4; + +int main(void) +{ + v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16}; + v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2}; + v_uint32x4 v3 = vec_mod(v1, v2); + return (int)vec_extractm(v3); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vx.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vx.c new file mode 100644 index 0000000000000000000000000000000000000000..18fb7ef94a248d0de890bafa9cae67a5559e47f9 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vx.c @@ -0,0 +1,16 @@ +#if (__VEC__ < 10301) || (__ARCH__ < 11) + #error VX not supported +#endif + +#include +int main(int argc, char **argv) +{ + __vector double x = vec_abs(vec_xl(argc, (double*)argv)); + __vector double y = vec_load_len((double*)argv, (unsigned int)argc); + + x = vec_round(vec_ceil(x) + vec_floor(y)); + __vector bool long long m = vec_cmpge(x, y); + __vector long long i = vec_signed(vec_sel(x, y, m)); + + return (int)vec_extract(i, 0); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe.c new file mode 100644 index 0000000000000000000000000000000000000000..e6933adce3d014d159e93d05166e5b67a0104efe --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe.c @@ -0,0 +1,25 @@ +#if (__VEC__ < 10302) || (__ARCH__ < 12) + #error VXE not supported +#endif + +#include +int main(int argc, char **argv) +{ + __vector float x = vec_nabs(vec_xl(argc, (float*)argv)); + __vector float y = vec_load_len((float*)argv, (unsigned int)argc); + + x = vec_round(vec_ceil(x) + vec_floor(y)); + __vector bool int m = vec_cmpge(x, y); + x = vec_sel(x, y, m); + + // need to test the existence of intrin "vflls" since vec_doublee + // is vec_doublee maps to wrong intrin "vfll". + // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100871 +#if defined(__GNUC__) && !defined(__clang__) + __vector long long i = vec_signed(__builtin_s390_vflls(x)); +#else + __vector long long i = vec_signed(vec_doublee(x)); +#endif + + return (int)vec_extract(i, 0); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe2.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe2.c new file mode 100644 index 0000000000000000000000000000000000000000..f36d57129af67f111fa9dccca55f76dc52e6001d --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe2.c @@ -0,0 +1,21 @@ +#if (__VEC__ < 10303) || (__ARCH__ < 13) + #error VXE2 not supported +#endif + +#include + +int main(int argc, char **argv) +{ + int val; + __vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' }; + __vector signed short search = { 'g', 'h', 'g', 'o' }; + __vector unsigned char len = { 0 }; + __vector unsigned char res = vec_search_string_cc(large, search, len, &val); + __vector float x = vec_xl(argc, (float*)argv); + __vector int i = vec_signed(x); + + i = vec_srdb(vec_sldb(i, i, 2), i, 3); + val += (int)vec_extract(res, 1); + val += vec_extract(i, 0); + return val; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_xop.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_xop.c new file mode 100644 index 0000000000000000000000000000000000000000..51d70cf2b6d85eae5be6bd08625dbff865530f84 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/cpu_xop.c @@ -0,0 +1,12 @@ +#include +#ifdef _MSC_VER + #include +#else + #include +#endif + +int main(void) +{ + __m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128()); + return _mm_cvtsi128_si32(a); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c new file mode 100644 index 0000000000000000000000000000000000000000..9cfd0c2a57f355cea353abd24d21343543017191 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c @@ -0,0 +1,18 @@ +#include +/** + * Test BW mask operations due to: + * - MSVC has supported it since vs2019 see, + * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html + * - Clang >= v8.0 + * - GCC >= v7.1 + */ +int main(void) +{ + __mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1)); + m64 = _kor_mask64(m64, m64); + m64 = _kxor_mask64(m64, m64); + m64 = _cvtu64_mask64(_cvtmask64_u64(m64)); + m64 = _mm512_kunpackd(m64, m64); + m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64); + return (int)_cvtmask64_u64(m64); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c new file mode 100644 index 0000000000000000000000000000000000000000..f0dc88bdd3724189dcc7cb91402db6067f8330be --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c @@ -0,0 +1,16 @@ +#include +/** + * Test DQ mask operations due to: + * - MSVC has supported it since vs2019 see, + * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html + * - Clang >= v8.0 + * - GCC >= v7.1 + */ +int main(void) +{ + __mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1)); + m8 = _kor_mask8(m8, m8); + m8 = _kxor_mask8(m8, m8); + m8 = _cvtu32_mask8(_cvtmask8_u32(m8)); + return (int)_cvtmask8_u32(m8); +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c new file mode 100644 index 0000000000000000000000000000000000000000..db01aaeef40570139d5df0f2f2a9e91e26f97f74 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c @@ -0,0 +1,41 @@ +#include +/** + * The following intrinsics don't have direct native support but compilers + * tend to emulate them. + * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19 + */ +int main(void) +{ + __m512 one_ps = _mm512_set1_ps(1.0f); + __m512d one_pd = _mm512_set1_pd(1.0); + __m512i one_i64 = _mm512_set1_epi64(1); + // add + float sum_ps = _mm512_reduce_add_ps(one_ps); + double sum_pd = _mm512_reduce_add_pd(one_pd); + int sum_int = (int)_mm512_reduce_add_epi64(one_i64); + sum_int += (int)_mm512_reduce_add_epi32(one_i64); + // mul + sum_ps += _mm512_reduce_mul_ps(one_ps); + sum_pd += _mm512_reduce_mul_pd(one_pd); + sum_int += (int)_mm512_reduce_mul_epi64(one_i64); + sum_int += (int)_mm512_reduce_mul_epi32(one_i64); + // min + sum_ps += _mm512_reduce_min_ps(one_ps); + sum_pd += _mm512_reduce_min_pd(one_pd); + sum_int += (int)_mm512_reduce_min_epi32(one_i64); + sum_int += (int)_mm512_reduce_min_epu32(one_i64); + sum_int += (int)_mm512_reduce_min_epi64(one_i64); + // max + sum_ps += _mm512_reduce_max_ps(one_ps); + sum_pd += _mm512_reduce_max_pd(one_pd); + sum_int += (int)_mm512_reduce_max_epi32(one_i64); + sum_int += (int)_mm512_reduce_max_epu32(one_i64); + sum_int += (int)_mm512_reduce_max_epi64(one_i64); + // and + sum_int += (int)_mm512_reduce_and_epi32(one_i64); + sum_int += (int)_mm512_reduce_and_epi64(one_i64); + // or + sum_int += (int)_mm512_reduce_or_epi32(one_i64); + sum_int += (int)_mm512_reduce_or_epi64(one_i64); + return (int)sum_ps + (int)sum_pd + sum_int; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c new file mode 100644 index 0000000000000000000000000000000000000000..514a2b18f96cb089bb3c96f6420356c892adefdf --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c @@ -0,0 +1,12 @@ +/** + * Assembler may not fully support the following VSX3 scalar + * instructions, even though compilers report VSX3 support. + */ +int main(void) +{ + unsigned short bits = 0xFF; + double f; + __asm__ __volatile__("xscvhpdp %x0,%x1" : "=wa"(f) : "wa"(bits)); + __asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits) : "wa" (f)); + return bits; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx4_mma.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx4_mma.c new file mode 100644 index 0000000000000000000000000000000000000000..a70b2a9f6f95408eb7cfe59c056f114cc363869b --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx4_mma.c @@ -0,0 +1,21 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector float fv4sf_t; +typedef __vector unsigned char vec_t; + +int main(void) +{ + __vector_quad acc0; + float a[4] = {0,1,2,3}; + float b[4] = {0,1,2,3}; + vec_t *va = (vec_t *) a; + vec_t *vb = (vec_t *) b; + __builtin_mma_xvf32ger(&acc0, va[0], vb[0]); + fv4sf_t result[4]; + __builtin_mma_disassemble_acc((void *)result, &acc0); + fv4sf_t c0 = result[0]; + return (int)((float*)&c0)[0]; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx_asm.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx_asm.c new file mode 100644 index 0000000000000000000000000000000000000000..b73a6f43808eeb5af2bd212ee88b6c1002a29901 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx_asm.c @@ -0,0 +1,36 @@ +/** + * Testing ASM VSX register number fixer '%x' + * + * old versions of CLANG doesn't support %x in the inline asm template + * which fixes register number when using any of the register constraints wa, wd, wf. + * + * xref: + * - https://bugs.llvm.org/show_bug.cgi?id=31837 + * - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html + */ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__)) + #define vsx_ld vec_vsx_ld + #define vsx_st vec_vsx_st +#else + #define vsx_ld vec_xl + #define vsx_st vec_xst +#endif + +int main(void) +{ + float z4[] = {0, 0, 0, 0}; + signed int zout[] = {0, 0, 0, 0}; + + __vector float vz4 = vsx_ld(0, z4); + __vector signed int asm_ret = vsx_ld(0, zout); + + __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret)); + + vsx_st(asm_ret, 0, zout); + return zout[0]; +} diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/checks/test_flags.c b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/test_flags.c new file mode 100644 index 0000000000000000000000000000000000000000..4cd09d42a6503780087632aae9ea5b458671fa57 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/checks/test_flags.c @@ -0,0 +1 @@ +int test_flags; diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/__init__.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f47a6f27e2bdec46314ce7d9dafabe6e32589833 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/__init__.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/arm.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/arm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..954fbdf6023d31ebcd9c3f05f78a31acff018172 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/arm.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77b547dfb6581315391a7fc36f1bbdeab46abfac Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e1c918dbaccf81cce721bcaeaf15d3a5aca776b Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/g95.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/g95.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2feb66529ee1c9f8203b21e5b41a3e9b7668688 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/g95.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/gnu.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/gnu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a06e07b7ca579033067ce5d3138064cd4ab8edda Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/gnu.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d817dcde4d6da2a652b009a617984b09e280d1c8 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..410299047e921a98433a76866e4656a771529abf Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eeb81eab1aa28aec5730579acab4a759dd666db Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/none.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/none.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f1282b11e5a9b25ae626358715ab7ce7174aae1 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/none.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9868cc558779759ce17ac825e67d5ae49d520be2 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f053bb65d2ce950fd53bfbcb47ecba1560aff912 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46ed4ff2ac1c0da2004088c76daeea63824fdce9 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/sun.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/sun.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91772f297658ce939ec04252d3be0e43c8fda324 Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/sun.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/vast.cpython-310.pyc b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/vast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38fdc9c7be0bfec7acf2df770b5f589e93a8149a Binary files /dev/null and b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/vast.cpython-310.pyc differ diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/arm.py b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/arm.py new file mode 100644 index 0000000000000000000000000000000000000000..3eb7e9af9c8ce3c5834f9e4d7283746961ea0981 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/fcompiler/arm.py @@ -0,0 +1,71 @@ +import sys + +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from sys import platform +from os.path import join, dirname, normpath + +compilers = ['ArmFlangCompiler'] + +import functools + +class ArmFlangCompiler(FCompiler): + compiler_type = 'arm' + description = 'Arm Compiler' + version_pattern = r'\s*Arm.*version (?P[\d.-]+).*' + + ar_exe = 'lib.exe' + possible_executables = ['armflang'] + + executables = { + 'version_cmd': ["", "--version"], + 'compiler_f77': ["armflang", "-fPIC"], + 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"], + 'compiler_f90': ["armflang", "-fPIC"], + 'linker_so': ["armflang", "-fPIC", "-shared"], + 'archiver': ["ar", "-cr"], + 'ranlib': None + } + + pic_flags = ["-fPIC", "-DPIC"] + c_compiler = 'arm' + module_dir_switch = '-module ' # Don't remove ending space! + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + opt.extend(['flang', 'flangrti', 'ompstub']) + return opt + + @functools.lru_cache(maxsize=128) + def get_library_dirs(self): + """List of compiler library directories.""" + opt = FCompiler.get_library_dirs(self) + flang_dir = dirname(self.executables['compiler_f77'][0]) + opt.append(normpath(join(flang_dir, '..', 'lib'))) + + return opt + + def get_flags(self): + return [] + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + return ['-O3'] + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + return '-Wl,-rpath=%s' % dir + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='armflang').get_version()) + diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/tests/test_ccompiler_opt.py b/falcon/lib/python3.10/site-packages/numpy/distutils/tests/test_ccompiler_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..3714aea0e12e05ac8346e51d169664e9c62f4293 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/tests/test_ccompiler_opt.py @@ -0,0 +1,808 @@ +import re, textwrap, os +from os import sys, path +from distutils.errors import DistutilsError + +is_standalone = __name__ == '__main__' and __package__ is None +if is_standalone: + import unittest, contextlib, tempfile, shutil + sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) + from ccompiler_opt import CCompilerOpt + + # from numpy/testing/_private/utils.py + @contextlib.contextmanager + def tempdir(*args, **kwargs): + tmpdir = tempfile.mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + def assert_(expr, msg=''): + if not expr: + raise AssertionError(msg) +else: + from numpy.distutils.ccompiler_opt import CCompilerOpt + from numpy.testing import assert_, tempdir + +# architectures and compilers to test +arch_compilers = dict( + x86 = ("gcc", "clang", "icc", "iccw", "msvc"), + x64 = ("gcc", "clang", "icc", "iccw", "msvc"), + ppc64 = ("gcc", "clang"), + ppc64le = ("gcc", "clang"), + armhf = ("gcc", "clang"), + aarch64 = ("gcc", "clang", "fcc"), + s390x = ("gcc", "clang"), + noarch = ("gcc",) +) + +class FakeCCompilerOpt(CCompilerOpt): + fake_info = "" + def __init__(self, trap_files="", trap_flags="", *args, **kwargs): + self.fake_trap_files = trap_files + self.fake_trap_flags = trap_flags + CCompilerOpt.__init__(self, None, **kwargs) + + def __repr__(self): + return textwrap.dedent("""\ + <<<< + march : {} + compiler : {} + ---------------- + {} + >>>> + """).format(self.cc_march, self.cc_name, self.report()) + + def dist_compile(self, sources, flags, **kwargs): + assert(isinstance(sources, list)) + assert(isinstance(flags, list)) + if self.fake_trap_files: + for src in sources: + if re.match(self.fake_trap_files, src): + self.dist_error("source is trapped by a fake interface") + if self.fake_trap_flags: + for f in flags: + if re.match(self.fake_trap_flags, f): + self.dist_error("flag is trapped by a fake interface") + # fake objects + return zip(sources, [' '.join(flags)] * len(sources)) + + def dist_info(self): + return FakeCCompilerOpt.fake_info + + @staticmethod + def dist_log(*args, stderr=False): + pass + +class _Test_CCompilerOpt: + arch = None # x86_64 + cc = None # gcc + + def setup_class(self): + FakeCCompilerOpt.conf_nocache = True + self._opt = None + + def nopt(self, *args, **kwargs): + FakeCCompilerOpt.fake_info = (self.arch, self.cc, "") + return FakeCCompilerOpt(*args, **kwargs) + + def opt(self): + if not self._opt: + self._opt = self.nopt() + return self._opt + + def march(self): + return self.opt().cc_march + + def cc_name(self): + return self.opt().cc_name + + def get_targets(self, targets, groups, **kwargs): + FakeCCompilerOpt.conf_target_groups = groups + opt = self.nopt( + cpu_baseline=kwargs.get("baseline", "min"), + cpu_dispatch=kwargs.get("dispatch", "max"), + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + with tempdir() as tmpdir: + file = os.path.join(tmpdir, "test_targets.c") + with open(file, 'w') as f: + f.write(targets) + gtargets = [] + gflags = {} + fake_objects = opt.try_dispatch([file]) + for source, flags in fake_objects: + gtar = path.basename(source).split('.')[1:-1] + glen = len(gtar) + if glen == 0: + gtar = "baseline" + elif glen == 1: + gtar = gtar[0].upper() + else: + # converting multi-target into parentheses str format to be equivalent + # to the configuration statements syntax. + gtar = ('('+' '.join(gtar)+')').upper() + gtargets.append(gtar) + gflags[gtar] = flags + + has_baseline, targets = opt.sources_status[file] + targets = targets + ["baseline"] if has_baseline else targets + # convert tuple that represent multi-target into parentheses str format + targets = [ + '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar + for tar in targets + ] + if len(targets) != len(gtargets) or not all(t in gtargets for t in targets): + raise AssertionError( + "'sources_status' returns different targets than the compiled targets\n" + "%s != %s" % (targets, gtargets) + ) + # return targets from 'sources_status' since the order is matters + return targets, gflags + + def arg_regex(self, **kwargs): + map2origin = dict( + x64 = "x86", + ppc64le = "ppc64", + aarch64 = "armhf", + clang = "gcc", + ) + march = self.march(); cc_name = self.cc_name() + map_march = map2origin.get(march, march) + map_cc = map2origin.get(cc_name, cc_name) + for key in ( + march, cc_name, map_march, map_cc, + march + '_' + cc_name, + map_march + '_' + cc_name, + march + '_' + map_cc, + map_march + '_' + map_cc, + ) : + regex = kwargs.pop(key, None) + if regex is not None: + break + if regex: + if isinstance(regex, dict): + for k, v in regex.items(): + if v[-1:] not in ')}$?\\.+*': + regex[k] = v + '$' + else: + assert(isinstance(regex, str)) + if regex[-1:] not in ')}$?\\.+*': + regex += '$' + return regex + + def expect(self, dispatch, baseline="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + features = ' '.join(opt.cpu_dispatch_names()) + if not match: + if len(features) != 0: + raise AssertionError( + 'expected empty features, not "%s"' % features + ) + return + if not re.match(match, features, re.IGNORECASE): + raise AssertionError( + 'dispatch features "%s" not match "%s"' % (features, match) + ) + + def expect_baseline(self, baseline, dispatch="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + features = ' '.join(opt.cpu_baseline_names()) + if not match: + if len(features) != 0: + raise AssertionError( + 'expected empty features, not "%s"' % features + ) + return + if not re.match(match, features, re.IGNORECASE): + raise AssertionError( + 'baseline features "%s" not match "%s"' % (features, match) + ) + + def expect_flags(self, baseline, dispatch="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + flags = ' '.join(opt.cpu_baseline_flags()) + if not match: + if len(flags) != 0: + raise AssertionError( + 'expected empty flags not "%s"' % flags + ) + return + if not re.match(match, flags): + raise AssertionError( + 'flags "%s" not match "%s"' % (flags, match) + ) + + def expect_targets(self, targets, groups={}, **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs) + targets = ' '.join(targets) + if not match: + if len(targets) != 0: + raise AssertionError( + 'expected empty targets, not "%s"' % targets + ) + return + if not re.match(match, targets, re.IGNORECASE): + raise AssertionError( + 'targets "%s" not match "%s"' % (targets, match) + ) + + def expect_target_flags(self, targets, groups={}, **kwargs): + match_dict = self.arg_regex(**kwargs) + if match_dict is None: + return + assert(isinstance(match_dict, dict)) + _, tar_flags = self.get_targets(targets=targets, groups=groups) + + for match_tar, match_flags in match_dict.items(): + if match_tar not in tar_flags: + raise AssertionError( + 'expected to find target "%s"' % match_tar + ) + flags = tar_flags[match_tar] + if not match_flags: + if len(flags) != 0: + raise AssertionError( + 'expected to find empty flags in target "%s"' % match_tar + ) + if not re.match(match_flags, flags): + raise AssertionError( + '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags) + ) + + def test_interface(self): + wrong_arch = "ppc64" if self.arch != "ppc64" else "x86" + wrong_cc = "clang" if self.cc != "clang" else "icc" + opt = self.opt() + assert_(getattr(opt, "cc_on_" + self.arch)) + assert_(not getattr(opt, "cc_on_" + wrong_arch)) + assert_(getattr(opt, "cc_is_" + self.cc)) + assert_(not getattr(opt, "cc_is_" + wrong_cc)) + + def test_args_empty(self): + for baseline, dispatch in ( + ("", "none"), + (None, ""), + ("none +none", "none - none"), + ("none -max", "min - max"), + ("+vsx2 -VSX2", "vsx avx2 avx512f -max"), + ("max -vsx - avx + avx512f neon -MAX ", + "min -min + max -max -vsx + avx2 -avx2 +NONE") + ) : + opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) + assert(len(opt.cpu_baseline_names()) == 0) + assert(len(opt.cpu_dispatch_names()) == 0) + + def test_args_validation(self): + if self.march() == "unknown": + return + # check sanity of argument's validation + for baseline, dispatch in ( + ("unkown_feature - max +min", "unknown max min"), # unknowing features + ("#avx2", "$vsx") # groups and polices aren't acceptable + ) : + try: + self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) + raise AssertionError("excepted an exception for invalid arguments") + except DistutilsError: + pass + + def test_skip(self): + # only takes what platform supports and skip the others + # without casing exceptions + self.expect( + "sse vsx neon", + x86="sse", ppc64="vsx", armhf="neon", unknown="" + ) + self.expect( + "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd", + x86 = "sse41 avx avx2", + ppc64 = "vsx2 vsx3", + armhf = "neon_vfpv4 asimd", + unknown = "" + ) + # any features in cpu_dispatch must be ignored if it's part of baseline + self.expect( + "sse neon vsx", baseline="sse neon vsx", + x86="", ppc64="", armhf="" + ) + self.expect( + "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp", + x86="", ppc64="", armhf="" + ) + + def test_implies(self): + # baseline combining implied features, so we count + # on it instead of testing 'feature_implies()'' directly + self.expect_baseline( + "fma3 avx2 asimd vsx3", + # .* between two spaces can validate features in between + x86 = "sse .* sse41 .* fma3.*avx2", + ppc64 = "vsx vsx2 vsx3", + armhf = "neon neon_fp16 neon_vfpv4 asimd" + ) + """ + special cases + """ + # in icc and msvc, FMA3 and AVX2 can't be separated + # both need to implies each other, same for avx512f & cd + for f0, f1 in ( + ("fma3", "avx2"), + ("avx512f", "avx512cd"), + ): + diff = ".* sse42 .* %s .*%s$" % (f0, f1) + self.expect_baseline(f0, + x86_gcc=".* sse42 .* %s$" % f0, + x86_icc=diff, x86_iccw=diff + ) + self.expect_baseline(f1, + x86_gcc=".* avx .* %s$" % f1, + x86_icc=diff, x86_iccw=diff + ) + # in msvc, following features can't be separated too + for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")): + for ff in f: + self.expect_baseline(ff, + x86_msvc=".*%s" % ' '.join(f) + ) + + # in ppc64le VSX and VSX2 can't be separated + self.expect_baseline("vsx", ppc64le="vsx vsx2") + # in aarch64 following features can't be separated + for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"): + self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd") + + def test_args_options(self): + # max & native + for o in ("max", "native"): + if o == "native" and self.cc_name() == "msvc": + continue + self.expect(o, + trap_files=".*cpu_(sse|vsx|neon|vx).c", + x86="", ppc64="", armhf="", s390x="" + ) + self.expect(o, + trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c", + x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16", + aarch64="", ppc64le="", s390x="vx" + ) + self.expect(o, + trap_files=".*cpu_(popcnt|vsx3).c", + x86="sse .* sse41", ppc64="vsx vsx2", + armhf="neon neon_fp16 .* asimd .*", + s390x="vx vxe vxe2" + ) + self.expect(o, + x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*", + # in icc, xop and fam4 aren't supported + x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", + x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", + # in msvc, avx512_knl avx512_knm aren't supported + x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*", + armhf=".* asimd asimdhp asimddp .*", + ppc64="vsx vsx2 vsx3 vsx4.*", + s390x="vx vxe vxe2.*" + ) + # min + self.expect("min", + x86="sse sse2", x64="sse sse2 sse3", + armhf="", aarch64="neon neon_fp16 .* asimd", + ppc64="", ppc64le="vsx vsx2", s390x="" + ) + self.expect( + "min", trap_files=".*cpu_(sse2|vsx2).c", + x86="", ppc64le="" + ) + # an exception must triggered if native flag isn't supported + # when option "native" is activated through the args + try: + self.expect("native", + trap_flags=".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", + x86=".*", ppc64=".*", armhf=".*", s390x=".*", aarch64=".*", + ) + if self.march() != "unknown": + raise AssertionError( + "excepted an exception for %s" % self.march() + ) + except DistutilsError: + if self.march() == "unknown": + raise AssertionError("excepted no exceptions") + + def test_flags(self): + self.expect_flags( + "sse sse2 vsx vsx2 neon neon_fp16 vx vxe", + x86_gcc="-msse -msse2", x86_icc="-msse -msse2", + x86_iccw="/arch:SSE2", + x86_msvc="/arch:SSE2" if self.march() == "x86" else "", + ppc64_gcc= "-mcpu=power8", + ppc64_clang="-mcpu=power8", + armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", + aarch64="", + s390x="-mzvector -march=arch12" + ) + # testing normalize -march + self.expect_flags( + "asimd", + aarch64="", + armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd" + ) + self.expect_flags( + "asimdhp", + aarch64_gcc=r"-march=armv8.2-a\+fp16", + armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16" + ) + self.expect_flags( + "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod" + ) + self.expect_flags( + # asimdfhm implies asimdhp + "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml" + ) + self.expect_flags( + "asimddp asimdhp asimdfhm", + aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml" + ) + self.expect_flags( + "vx vxe vxe2", + s390x=r"-mzvector -march=arch13" + ) + + def test_targets_exceptions(self): + for targets in ( + "bla bla", "/*@targets", + "/*@targets */", + "/*@targets unknown */", + "/*@targets $unknown_policy avx2 */", + "/*@targets #unknown_group avx2 */", + "/*@targets $ */", + "/*@targets # vsx */", + "/*@targets #$ vsx */", + "/*@targets vsx avx2 ) */", + "/*@targets vsx avx2 (avx2 */", + "/*@targets vsx avx2 () */", + "/*@targets vsx avx2 ($autovec) */", # no features + "/*@targets vsx avx2 (xxx) */", + "/*@targets vsx avx2 (baseline) */", + ) : + try: + self.expect_targets( + targets, + x86="", armhf="", ppc64="", s390x="" + ) + if self.march() != "unknown": + raise AssertionError( + "excepted an exception for %s" % self.march() + ) + except DistutilsError: + if self.march() == "unknown": + raise AssertionError("excepted no exceptions") + + def test_targets_syntax(self): + for targets in ( + "/*@targets $keep_baseline sse vsx neon vx*/", + "/*@targets,$keep_baseline,sse,vsx,neon vx*/", + "/*@targets*$keep_baseline*sse*vsx*neon*vx*/", + """ + /* + ** @targets + ** $keep_baseline, sse vsx,neon, vx + */ + """, + """ + /* + ************@targets**************** + ** $keep_baseline, sse vsx, neon, vx + ************************************ + */ + """, + """ + /* + /////////////@targets///////////////// + //$keep_baseline//sse//vsx//neon//vx + ///////////////////////////////////// + */ + """, + """ + /* + @targets + $keep_baseline + SSE VSX NEON VX*/ + """ + ) : + self.expect_targets(targets, + x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown="" + ) + + def test_targets(self): + # test skipping baseline features + self.expect_targets( + """ + /*@targets + sse sse2 sse41 avx avx2 avx512f + vsx vsx2 vsx3 vsx4 + neon neon_fp16 asimdhp asimddp + vx vxe vxe2 + */ + """, + baseline="avx vsx2 asimd vx vxe", + x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3", + s390x="vxe2" + ) + # test skipping non-dispatch features + self.expect_targets( + """ + /*@targets + sse41 avx avx2 avx512f + vsx2 vsx3 vsx4 + asimd asimdhp asimddp + vx vxe vxe2 + */ + """, + baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2", + x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2" + ) + # test skipping features that not supported + self.expect_targets( + """ + /*@targets + sse2 sse41 avx2 avx512f + vsx2 vsx3 vsx4 + neon asimdhp asimddp + vx vxe vxe2 + */ + """, + baseline="", + trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c", + x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon", + s390x="vxe vx" + ) + # test skipping features that implies each other + self.expect_targets( + """ + /*@targets + sse sse2 avx fma3 avx2 avx512f avx512cd + vsx vsx2 vsx3 + neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp + asimddp asimdfhm + */ + """, + baseline="", + x86_gcc="avx512cd avx512f avx2 fma3 avx sse2", + x86_msvc="avx512cd avx2 avx sse2", + x86_icc="avx512cd avx2 avx sse2", + x86_iccw="avx512cd avx2 avx sse2", + ppc64="vsx3 vsx2 vsx", + ppc64le="vsx3 vsx2", + armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon", + aarch64="asimdfhm asimddp asimdhp asimd" + ) + + def test_targets_policies(self): + # 'keep_baseline', generate objects for baseline features + self.expect_targets( + """ + /*@targets + $keep_baseline + sse2 sse42 avx2 avx512f + vsx2 vsx3 + neon neon_vfpv4 asimd asimddp + vx vxe vxe2 + */ + """, + baseline="sse41 avx2 vsx2 asimd vsx3 vxe", + x86="avx512f avx2 sse42 sse2", + ppc64="vsx3 vsx2", + armhf="asimddp asimd neon_vfpv4 neon", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimddp asimd", + s390x="vxe2 vxe vx" + ) + # 'keep_sort', leave the sort as-is + self.expect_targets( + """ + /*@targets + $keep_baseline $keep_sort + avx512f sse42 avx2 sse2 + vsx2 vsx3 + asimd neon neon_vfpv4 asimddp + vxe vxe2 + */ + """, + x86="avx512f sse42 avx2 sse2", + ppc64="vsx2 vsx3", + armhf="asimd neon neon_vfpv4 asimddp", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimd asimddp", + s390x="vxe vxe2" + ) + # 'autovec', skipping features that can't be + # vectorized by the compiler + self.expect_targets( + """ + /*@targets + $keep_baseline $keep_sort $autovec + avx512f avx2 sse42 sse41 sse2 + vsx3 vsx2 + asimddp asimd neon_vfpv4 neon + */ + """, + x86_gcc="avx512f avx2 sse42 sse41 sse2", + x86_icc="avx512f avx2 sse42 sse41 sse2", + x86_iccw="avx512f avx2 sse42 sse41 sse2", + x86_msvc="avx512f avx2 sse2" + if self.march() == 'x86' else "avx512f avx2", + ppc64="vsx3 vsx2", + armhf="asimddp asimd neon_vfpv4 neon", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimddp asimd" + ) + for policy in ("$maxopt", "$autovec"): + # 'maxopt' and autovec set the max acceptable optimization flags + self.expect_target_flags( + "/*@targets baseline %s */" % policy, + gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"}, + iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"}, + unknown={"baseline":".*"} + ) + + # 'werror', force compilers to treat warnings as errors + self.expect_target_flags( + "/*@targets baseline $werror */", + gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"}, + iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"}, + unknown={"baseline":".*"} + ) + + def test_targets_groups(self): + self.expect_targets( + """ + /*@targets $keep_baseline baseline #test_group */ + """, + groups=dict( + test_group=(""" + $keep_baseline + asimddp sse2 vsx2 avx2 vsx3 + avx512f asimdhp + """) + ), + x86="avx512f avx2 sse2 baseline", + ppc64="vsx3 vsx2 baseline", + armhf="asimddp asimdhp baseline" + ) + # test skip duplicating and sorting + self.expect_targets( + """ + /*@targets + * sse42 avx avx512f + * #test_group_1 + * vsx2 + * #test_group_2 + * asimddp asimdfhm + */ + """, + groups=dict( + test_group_1=(""" + VSX2 vsx3 asimd avx2 SSE41 + """), + test_group_2=(""" + vsx2 vsx3 asImd aVx2 sse41 + """) + ), + x86="avx512f avx2 avx sse42 sse41", + ppc64="vsx3 vsx2", + # vsx2 part of the default baseline of ppc64le, option ("min") + ppc64le="vsx3", + armhf="asimdfhm asimddp asimd", + # asimd part of the default baseline of aarch64, option ("min") + aarch64="asimdfhm asimddp" + ) + + def test_targets_multi(self): + self.expect_targets( + """ + /*@targets + (avx512_clx avx512_cnl) (asimdhp asimddp) + */ + """, + x86=r"\(avx512_clx avx512_cnl\)", + armhf=r"\(asimdhp asimddp\)", + ) + # test skipping implied features and auto-sort + self.expect_targets( + """ + /*@targets + f16c (sse41 avx sse42) (sse3 avx2 avx512f) + vsx2 (vsx vsx3 vsx2) + (neon neon_vfpv4 asimd asimdhp asimddp) + */ + """, + x86="avx512f f16c avx", + ppc64="vsx3 vsx2", + ppc64le="vsx3", # vsx2 part of baseline + armhf=r"\(asimdhp asimddp\)", + ) + # test skipping implied features and keep sort + self.expect_targets( + """ + /*@targets $keep_sort + (sse41 avx sse42) (sse3 avx2 avx512f) + (vsx vsx3 vsx2) + (asimddp neon neon_vfpv4 asimd asimdhp) + (vx vxe vxe2) + */ + """, + x86="avx avx512f", + ppc64="vsx3", + armhf=r"\(asimdhp asimddp\)", + s390x="vxe2" + ) + # test compiler variety and avoiding duplicating + self.expect_targets( + """ + /*@targets $keep_sort + fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3 + */ + """, + x86_gcc=r"fma3 avx2 \(fma3 avx2\)", + x86_icc="avx2", x86_iccw="avx2", + x86_msvc="avx2" + ) + +def new_test(arch, cc): + if is_standalone: return textwrap.dedent("""\ + class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase): + arch = '{arch}' + cc = '{cc}' + def __init__(self, methodName="runTest"): + unittest.TestCase.__init__(self, methodName) + self.setup_class() + """).format( + class_name=arch + '_' + cc, arch=arch, cc=cc + ) + return textwrap.dedent("""\ + class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt): + arch = '{arch}' + cc = '{cc}' + """).format( + class_name=arch + '_' + cc, arch=arch, cc=cc + ) +""" +if 1 and is_standalone: + FakeCCompilerOpt.fake_info = "x86_icc" + cco = FakeCCompilerOpt(None, cpu_baseline="avx2") + print(' '.join(cco.cpu_baseline_names())) + print(cco.cpu_baseline_flags()) + unittest.main() + sys.exit() +""" +for arch, compilers in arch_compilers.items(): + for cc in compilers: + exec(new_test(arch, cc)) + +if is_standalone: + unittest.main() diff --git a/falcon/lib/python3.10/site-packages/numpy/distutils/tests/test_npy_pkg_config.py b/falcon/lib/python3.10/site-packages/numpy/distutils/tests/test_npy_pkg_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b287ebe2e83209fdcf5add161a7af8d988b9d086 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/numpy/distutils/tests/test_npy_pkg_config.py @@ -0,0 +1,84 @@ +import os + +from numpy.distutils.npy_pkg_config import read_config, parse_flags +from numpy.testing import temppath, assert_ + +simple = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[default] +cflags = -I/usr/include +libs = -L/usr/lib +""" +simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', + 'version': '0.1', 'name': 'foo'} + +simple_variable = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[variables] +prefix = /foo/bar +libdir = ${prefix}/lib +includedir = ${prefix}/include + +[default] +cflags = -I${includedir} +libs = -L${libdir} +""" +simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', + 'version': '0.1', 'name': 'foo'} + +class TestLibraryInfo: + def test_simple(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple) + pkg = os.path.splitext(path)[0] + out = read_config(pkg) + + assert_(out.cflags() == simple_d['cflags']) + assert_(out.libs() == simple_d['libflags']) + assert_(out.name == simple_d['name']) + assert_(out.version == simple_d['version']) + + def test_simple_variable(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple_variable) + pkg = os.path.splitext(path)[0] + out = read_config(pkg) + + assert_(out.cflags() == simple_variable_d['cflags']) + assert_(out.libs() == simple_variable_d['libflags']) + assert_(out.name == simple_variable_d['name']) + assert_(out.version == simple_variable_d['version']) + out.vars['prefix'] = '/Users/david' + assert_(out.cflags() == '-I/Users/david/include') + +class TestParseFlags: + def test_simple_cflags(self): + d = parse_flags("-I/usr/include") + assert_(d['include_dirs'] == ['/usr/include']) + + d = parse_flags("-I/usr/include -DFOO") + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) + + d = parse_flags("-I /usr/include -DFOO") + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) + + def test_simple_lflags(self): + d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) + + d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) diff --git a/falcon/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc b/falcon/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..213920e71d71cfaa6a15f17140210a516849f6f5 --- /dev/null +++ b/falcon/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52af0e1a847faa1fed6fea271a23db181d94f16a856ff4d034eff404e623b6ef +size 109382