diff --git a/.gitattributes b/.gitattributes index 72a1ea404e8ddb4f1ff5d27f0cd84ca000a88ce4..b117c79927f77e63ee15d7596796a1ae4ed53883 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1552,3 +1552,4 @@ infer_4_47_1/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symb infer_4_47_1/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 filter=lfs diff=lfs merge=lfs -text infer_4_30_0/lib/python3.10/site-packages/vllm/vllm_flash_attn/_vllm_fa3_C.abi3.so filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/xformers/_C_flashattention.so filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_internvl/lib/python3.10/site-packages/xformers/_C_flashattention.so b/evalkit_internvl/lib/python3.10/site-packages/xformers/_C_flashattention.so new file mode 100644 index 0000000000000000000000000000000000000000..4a209f105e9c2d1f04082a5d337b2263354f8405 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/xformers/_C_flashattention.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f865f8efa1afe4d8140546d0563c0b35afdb64f8cee4f8e26af12225a56edf5a +size 276379872 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d6d0034c5e65e10679a919e274834c4caac5e193 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _batch_norm_impl_index(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..08c97dad3fff673386c480057c109d30cfd39f27 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fb855f64fc0e254c13c73c4386adc540cdd89f54 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & _fill_mem_eff_dropout_mask_(at::Tensor & self, double dropout_p, int64_t seed, int64_t offset); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_forward_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..795d7db6aa3956be377938b3b256b2185e5f700b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_forward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _flash_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & cum_seq_q, const ::std::optional & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional scale=::std::nullopt, ::std::optional window_size_left=::std::nullopt, ::std::optional window_size_right=::std::nullopt, const ::std::optional & seqused_k={}, const ::std::optional & alibi_slopes={}); +TORCH_API ::std::tuple _flash_attention_forward_symint(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & cum_seq_q, const ::std::optional & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional scale=::std::nullopt, ::std::optional window_size_left=::std::nullopt, ::std::optional window_size_right=::std::nullopt, const ::std::optional & seqused_k={}, const ::std::optional & alibi_slopes={}); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_max_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_max_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..80af7d2c9e058e8011f3e188e24ec9b1ee3333c5 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_max_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector _foreach_max(at::TensorList self); +TORCH_API void _foreach_max_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_max_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adagrad.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adagrad.h new file mode 100644 index 0000000000000000000000000000000000000000..31b38c1e049f0e77f699d0220acf9a35071d2f5b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adagrad.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fused_adagrad_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_adagrad_(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}) { + return at::_ops::_fused_adagrad_::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); +} + +// aten::_fused_adagrad.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adagrad_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}) { + return at::_ops::_fused_adagrad_out::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf, out); +} +// aten::_fused_adagrad.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adagrad_outf(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adagrad_out::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf, out); +} + +// aten::_fused_adagrad(Tensor[] self, Tensor[] grads, Tensor[] state_sums, Tensor[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] state_sums_out, Tensor[] state_steps_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector> _fused_adagrad(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}) { + return at::_ops::_fused_adagrad::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_meta_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..be50d903e91b3836c1f4ad388aa61559ae7d8acc --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API int64_t _fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, ::std::optional scale=::std::nullopt, bool enable_gqa=false); + +} // namespace meta +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3908b8975d54eb4a55b13bfd8191cc363d9119d1 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _histogramdd_from_bin_cts(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8b782bfb44ad91a265bfcd01812f899654def6a2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _index_put_impl_ { + using schema = at::Tensor & (at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_index_put_impl_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, bool unsafe); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, bool unsafe); +}; + +struct TORCH_API _index_put_impl_out { + using schema = at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_index_put_impl") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out); +}; + +struct TORCH_API _index_put_impl { + using schema = at::Tensor (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_index_put_impl") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, bool unsafe); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, bool unsafe); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ce5288b724b4054e29584989aa96530e4eed3513 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _is_any_true(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8bb1ff99fb31cb0e310624141dd7115c617fd305 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _masked_softmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, ::std::optional dim=::std::nullopt, ::std::optional mask_type=::std::nullopt); +TORCH_API at::Tensor & _masked_softmax_outf(const at::Tensor & self, const at::Tensor & mask, ::std::optional dim, ::std::optional mask_type, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values_copy_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c373c9fcae8911feb76771facf29dd92cdd9837c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_get_values_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_get_values_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_get_values_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API _nested_get_values_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_get_values_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..0f7e0c73649e9b9701f99f4e85e698dcfa282ae4 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor +inline at::Tensor _new_zeros_with_same_feature_meta(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) { + return at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims); +} + +// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _new_zeros_with_same_feature_meta_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) { + return at::_ops::_new_zeros_with_same_feature_meta_out::call(self, other, self_num_batch_dims, out); +} +// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _new_zeros_with_same_feature_meta_outf(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) { + return at::_ops::_new_zeros_with_same_feature_meta_out::call(self, other, self_num_batch_dims, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7b4ea4a4729751337105ec1212aaf34632223acb --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _new_zeros_with_same_feature_meta { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_new_zeros_with_same_feature_meta") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims); +}; + +struct TORCH_API _new_zeros_with_same_feature_meta_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_new_zeros_with_same_feature_meta") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_for_mps_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_for_mps_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b9d2e5fd5be542077158871f57ae9c937e4f7895 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_for_mps_native.h @@ -0,0 +1,20 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_draw_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_draw_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f62b3196c5a5f0fd2bcb8155064a7ae665c86eea --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_draw_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _sobol_engine_draw(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, ::std::optional dtype); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2f77ee95d7e20e1a6042f725941939c18eaed74c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_warn_in_autograd { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_warn_in_autograd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_warn_in_autograd(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API _test_warn_in_autograd_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_warn_in_autograd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..60ff1770aa2855e9eff152b72c08c29b6d28b325 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor _upsample_bilinear2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4be7e9d494a6681667577f637fbbea6ee04c36d7 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & _upsample_nearest_exact3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0bdc3b57dbdc573db11947d8c9292e26fd3e3dd7 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arcsinh.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arcsinh.h new file mode 100644 index 0000000000000000000000000000000000000000..a4b8d9dbc27925c7108c24d863118991492aa942 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arcsinh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::arcsinh(Tensor self) -> Tensor +inline at::Tensor arcsinh(const at::Tensor & self) { + return at::_ops::arcsinh::call(self); +} + +// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & arcsinh_(at::Tensor & self) { + return at::_ops::arcsinh_::call(self); +} + +// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arcsinh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::arcsinh_out::call(self, out); +} +// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arcsinh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::arcsinh_out::call(self, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh.h new file mode 100644 index 0000000000000000000000000000000000000000..6e0e59442c2a874fd0fbbe6da7b48125c418ed7b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::arctanh(Tensor self) -> Tensor +inline at::Tensor arctanh(const at::Tensor & self) { + return at::_ops::arctanh::call(self); +} + +// aten::arctanh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & arctanh_(at::Tensor & self) { + return at::_ops::arctanh_::call(self); +} + +// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arctanh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::arctanh_out::call(self, out); +} +// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arctanh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::arctanh_out::call(self, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..57d100008fd5bc933ef108ed9dd980921036792a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & bernoulli_out(at::Tensor & out, const at::Tensor & self, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & bernoulli_outf(const at::Tensor & self, ::std::optional generator, at::Tensor & out); +TORCH_API at::Tensor & bernoulli_(at::Tensor & self, const at::Tensor & p, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & bernoulli_(at::Tensor & self, double p=0.5, ::std::optional generator=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_tensors_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_tensors_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4e203467a5b22b477a5bb84b602082dd55597d28 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_tensors_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector broadcast_tensors(at::TensorList tensors); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b07fe85ee61f56540fd7684b3012da7c91251bac --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & channel_shuffle_out(at::Tensor & out, const at::Tensor & self, int64_t groups); +TORCH_API at::Tensor & channel_shuffle_outf(const at::Tensor & self, int64_t groups, at::Tensor & out); +TORCH_API at::Tensor & channel_shuffle_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymInt groups); +TORCH_API at::Tensor & channel_shuffle_symint_outf(const at::Tensor & self, c10::SymInt groups, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..626df101f74f73cdf17dd1eddeaf074490bdf1f2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API coalesce { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::coalesce") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "coalesce(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/conj_physical_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/conj_physical_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..da612acb00c1ea07c85ea50a95777908add07caf --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/conj_physical_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..53a7e21142c83275eab25e2d4e9b851bef17bf71 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor conv3d_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); +TORCH_API at::Tensor conv3d_padding_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::string_view padding="valid", c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..83e1529706a96cb86da5350e5bf5b67116e533fe --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple cummin(const at::Tensor & self, int64_t dim); +TORCH_API ::std::tuple cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim); +TORCH_API ::std::tuple cummin_outf(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..869f138b1194711cbb7599d97d2900b5daaa7239 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API diagonal_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2); +}; + +struct TORCH_API diagonal_backward_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, int64_t, int64_t, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft2_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft2_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f9f64565a01a8cde80ce6b519d8e38dd190e60d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft2_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_hfft2(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor fft_hfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt); +TORCH_API const at::Tensor & fft_hfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt); +TORCH_API const at::Tensor & fft_hfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, const at::Tensor & out); +TORCH_API const at::Tensor & fft_hfft2_symint_out(const at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt); +TORCH_API const at::Tensor & fft_hfft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, const at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2.h new file mode 100644 index 0000000000000000000000000000000000000000..667003795eb9579d166e554e20257c961c086d52 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_rfft2::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_rfft2::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm); + } +} + +// aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_rfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_rfft2::call(self, s, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_rfft2::call(self, s, dim, norm); + } +} + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_rfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_rfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); + } +} + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); + } +} + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out); + } +} + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..90e05ba52039c823736375263b44bd6106bddf98 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..802b83632fb36510e1859ba333f67df84360298c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..33b5003030f46842945bf85ea5f9948d9b25ab73 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9b007238b4049fd710b0fb0037fa6f17954227c8 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5895ca6f884253d6d07434b43e8276e7baf979a1 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8078540fd9c25c3778b4e63ac557775b2a16fd56 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b56d85b93666fca706e614c4f58f609d728e8311 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_solve(const at::Tensor & A, const at::Tensor & B, bool left=true); +TORCH_API at::Tensor & linalg_solve_out(at::Tensor & out, const at::Tensor & A, const at::Tensor & B, bool left=true); +TORCH_API at::Tensor & linalg_solve_outf(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6fa322d4ae2d545d7ee7e8c3446476d5e1275197 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_vector_norm_out : public at::meta::structured_linalg_vector_norm { +void impl(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a899a2f167d74ffb533cef846c0ec4e4ef1ba688 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logical_and(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_and_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_and_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b6227e6152ef9b8743d08c60bb0492a07bf9cf80 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool3d_with_indices_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::max_pool3d_with_indices_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API max_pool3d_with_indices_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::max_pool3d_with_indices_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..229eca9154922bc64cc07676c35782f648ecc92e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor mean(const at::Tensor & self, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_outf(const at::Tensor & self, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid.h new file mode 100644 index 0000000000000000000000000000000000000000..7267a9804be0e93475faa4d80c6ac6ab6cc7210f --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::meshgrid(Tensor[] tensors) -> Tensor[] +inline ::std::vector meshgrid(at::TensorList tensors) { + return at::_ops::meshgrid::call(tensors); +} + +// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] +inline ::std::vector meshgrid(at::TensorList tensors, c10::string_view indexing) { + return at::_ops::meshgrid_indexing::call(tensors, indexing); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..1bb184b9331050b4d6b48b76aada66c58cfcdb59 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) +inline ::std::tuple> miopen_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); +} + +// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () +inline void miopen_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); +} +// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () +inline void miopen_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a55e796448f13815f56d76661d23d5dd429fd195 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor mish_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2ed1d1f8a48c2d5aaa5603c8261c8342793d45f3 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_adaptive_avg_pool2d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_adaptive_avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +struct TORCH_API mkldnn_adaptive_avg_pool2d_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_adaptive_avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nanquantile_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nanquantile_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..40d5623b8be1d825aa3778167c35900f2edb6406 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nanquantile_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API nanquantile { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional, bool, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nanquantile") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & q, ::std::optional dim, bool keepdim, c10::string_view interpolation); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, ::std::optional dim, bool keepdim, c10::string_view interpolation); +}; + +struct TORCH_API nanquantile_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, bool, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nanquantile") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & q, ::std::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, ::std::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out); +}; + +struct TORCH_API nanquantile_scalar { + using schema = at::Tensor (const at::Tensor &, double, ::std::optional, bool, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nanquantile") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor") + static at::Tensor call(const at::Tensor & self, double q, ::std::optional dim, bool keepdim, c10::string_view interpolation); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, ::std::optional dim, bool keepdim, c10::string_view interpolation); +}; + +struct TORCH_API nanquantile_scalar_out { + using schema = at::Tensor & (const at::Tensor &, double, ::std::optional, bool, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nanquantile") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double q, ::std::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, ::std::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..4b1695ccb53b563c45fceae43dcdb852de484569 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +inline ::std::tuple native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask); + } +} + +// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +inline ::std::tuple native_layer_norm_backward_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); + } +} + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); + } +} + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); + } +} + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_layer_norm_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); + } +} + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_layer_norm_backward_symint_outf(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fe0a7c30eed2144eb663b239d8f950012b65cca8 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple math_native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); +TORCH_API ::std::tuple native_layer_norm_out_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple layer_norm_cpu(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); +TORCH_API ::std::tuple layer_norm_cuda(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); +TORCH_API ::std::tuple nested_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b6ccb1a656042c840fe0c216b1ab49dcd44ab650 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_nll_loss_forward_out_cpu : public at::meta::structured_nll_loss_forward { +void impl(const at::Tensor & self, const at::Tensor & target, at::OptionalTensorRef weight, int64_t reduction, int64_t ignore_index, const at::Tensor & output, const at::Tensor & total_weight); +}; +struct TORCH_API structured_nll_loss_forward_out_cuda : public at::meta::structured_nll_loss_forward { +void impl(const at::Tensor & self, const at::Tensor & target, at::OptionalTensorRef weight, int64_t reduction, int64_t ignore_index, const at::Tensor & output, const at::Tensor & total_weight); +}; +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_static_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_static_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9690c07494ae12e7afdad1cc7de61e9c09d1ec3b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_static_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor nonzero_static(const at::Tensor & self, int64_t size, int64_t fill_value=-1); +TORCH_API at::Tensor & nonzero_static_out(at::Tensor & out, const at::Tensor & self, int64_t size, int64_t fill_value=-1); +TORCH_API at::Tensor & nonzero_static_outf(const at::Tensor & self, int64_t size, int64_t fill_value, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/not_equal.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/not_equal.h new file mode 100644 index 0000000000000000000000000000000000000000..68cb64b663dda70e96e14ad0551eeaee0ae33e5b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/not_equal.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & not_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::not_equal_Scalar_out::call(self, other, out); +} +// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & not_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::not_equal_Scalar_out::call(self, other, out); +} + +// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor not_equal(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::not_equal_Scalar::call(self, other); +} + +// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & not_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::not_equal_Tensor_out::call(self, other, out); +} +// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & not_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::not_equal_Tensor_out::call(self, other, out); +} + +// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor not_equal(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::not_equal_Tensor::call(self, other); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..d91d32d6f9a6705f9c02c2d07e0367af5aa64f89 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor +inline at::Tensor nuclear_norm(const at::Tensor & self, bool keepdim=false) { + return at::_ops::nuclear_norm::call(self, keepdim); +} + +// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nuclear_norm_out(at::Tensor & out, const at::Tensor & self, bool keepdim=false) { + return at::_ops::nuclear_norm_out::call(self, keepdim, out); +} +// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nuclear_norm_outf(const at::Tensor & self, bool keepdim, at::Tensor & out) { + return at::_ops::nuclear_norm_out::call(self, keepdim, out); +} + +// aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor +inline at::Tensor nuclear_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::nuclear_norm_dim::call(self, dim, keepdim); +} + +// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nuclear_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::nuclear_norm_dim_out::call(self, dim, keepdim, out); +} +// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nuclear_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::nuclear_norm_dim_out::call(self, dim, keepdim, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7c6ddab33c5a8d3e3f85a37d3144685d85b45f4b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API poisson { + using schema = at::Tensor (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::poisson") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "poisson(Tensor self, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, ::std::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional generator); +}; + +struct TORCH_API poisson_out { + using schema = at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::poisson") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional generator, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/promote_types.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/promote_types.h new file mode 100644 index 0000000000000000000000000000000000000000..537c9ca0fa63f38c586fd5a0eee4b7e34b5fa347 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/promote_types.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType +inline at::ScalarType promote_types(at::ScalarType type1, at::ScalarType type2) { + return at::_ops::promote_types::call(type1, type2); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/put_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/put_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e1f78e0c55db94335f45f5941c230739e213dd20 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/put_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API put_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::put_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate); +}; + +struct TORCH_API put { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::put") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate); +}; + +struct TORCH_API put_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::put") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ec489fbcaff32166901bc3c88a7ed71c19f82d8a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor quantize_per_channel(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool3d_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool3d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fe63b83559d48d5140ef44a2604a4cc5c05a8613 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool3d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & quantized_max_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API at::Tensor & quantized_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/refine_names.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/refine_names.h new file mode 100644 index 0000000000000000000000000000000000000000..40448cc467889f980a348c714167644b24ec11e5 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/refine_names.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/rms_norm_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/rms_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5475ee384cfcc8e0204e856acdcb1f0aedbc274e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/rms_norm_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rms_norm { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, const ::std::optional &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rms_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rms_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, ::std::optional eps); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, ::std::optional eps); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/signbit_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/signbit_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1c754fdf4d392204fd3b5da147ada2407b2d2846 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/signbit_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor signbit(const at::Tensor & self); +TORCH_API at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/sin_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/sin_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..401bd9a106065c500110fed14356a65ee921e704 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/sin_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_sin : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slice.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slice.h new file mode 100644 index 0000000000000000000000000000000000000000..0b3c968b6256aec73ccd6ed68871dbc6cd2c356e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slice.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) +inline at::Tensor slice(const at::Tensor & self, int64_t dim=0, ::std::optional start=::std::nullopt, ::std::optional end=::std::nullopt, int64_t step=1) { + return at::_ops::slice_Tensor::call(self, dim, start.has_value() ? ::std::make_optional(c10::SymInt(*start)) : ::std::nullopt, end.has_value() ? ::std::make_optional(c10::SymInt(*end)) : ::std::nullopt, step); +} +namespace symint { + template ::value>> + at::Tensor slice(const at::Tensor & self, int64_t dim=0, ::std::optional start=::std::nullopt, ::std::optional end=::std::nullopt, int64_t step=1) { + return at::_ops::slice_Tensor::call(self, dim, start.has_value() ? ::std::make_optional(c10::SymInt(*start)) : ::std::nullopt, end.has_value() ? ::std::make_optional(c10::SymInt(*end)) : ::std::nullopt, step); + } +} + +// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) +inline at::Tensor slice_symint(const at::Tensor & self, int64_t dim=0, ::std::optional start=::std::nullopt, ::std::optional end=::std::nullopt, c10::SymInt step=1) { + return at::_ops::slice_Tensor::call(self, dim, start, end, step); +} +namespace symint { + template ::value>> + at::Tensor slice(const at::Tensor & self, int64_t dim=0, ::std::optional start=::std::nullopt, ::std::optional end=::std::nullopt, c10::SymInt step=1) { + return at::_ops::slice_Tensor::call(self, dim, start, end, step); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d.h new file mode 100644 index 0000000000000000000000000000000000000000..f8ec4f1f25c5a1e882432e74eb70e88ae700a33e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); + } +} + +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), out); + } +} + +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out); + } +} + +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out); + } +} + +// aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor +inline at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); +} +namespace symint { + template ::value>> + at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); + } +} + +// aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor +inline at::Tensor slow_conv3d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding); +} +namespace symint { + template ::value>> + at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)) { + return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a65cb75ef65af431a53545941b4b692f33c9ac9e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_bessel_j1 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_bessel_j1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_bessel_j1(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_bessel_j1_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_bessel_j1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f013747c084c3db2399f9e117554b5319bebca87 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_erfinv(const at::Tensor & self); +TORCH_API at::Tensor & special_erfinv_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_erfinv_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2fd0fdd80b73d55053bcf719d448a5671ce36b6d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_i1(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9c2b3c888900c60eef5e14396aa6cad9fd9a4c2b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_scaled_modified_bessel_k1 : public TensorIteratorBase { + + + void meta(const at::Tensor & x); +}; + +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b96e1b399c5f2f2d37fb003cb0bf4a71570ed17 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor tanh(const at::Tensor & self); +TORCH_API at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & tanh_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..63c2733ef14f9a567bbe9f108e2f4cbb8c323bd5 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API trapz_x { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trapz") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor") + static at::Tensor call(const at::Tensor & y, const at::Tensor & x, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim); +}; + +struct TORCH_API trapz_dx { + using schema = at::Tensor (const at::Tensor &, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trapz") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dx") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor") + static at::Tensor call(const at::Tensor & y, double dx, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, double dx, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/triu_indices_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/triu_indices_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6841e751ead8d06f66cd8d282bd1185d08524126 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/triu_indices_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim.h new file mode 100644 index 0000000000000000000000000000000000000000..8aa76268652b76bb8a1feea0c28d4a7eb8bd9f5b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) +inline ::std::tuple unique_dim(const at::Tensor & self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::unique_dim::call(self, dim, sorted, return_inverse, return_counts); +} + +// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple unique_dim_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::unique_dim_out::call(self, dim, sorted, return_inverse, return_counts, out0, out1, out2); +} +// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple unique_dim_outf(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::unique_dim_out::call(self, dim, sorted, return_inverse, return_counts, out0, out1, out2); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9299cf0ca07da478570148fcb52e72371a40f512 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor upsample_nearest1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, ::std::optional> scale_factors); +struct TORCH_API structured_upsample_nearest1d_out_cpu : public at::meta::structured_upsample_nearest1d { +void impl(const at::Tensor & self, at::ArrayRef output_size, ::std::optional scales, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_nearest1d_out_cuda : public at::meta::structured_upsample_nearest1d { +void impl(const at::Tensor & self, at::ArrayRef output_size, ::std::optional scales, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d7328784f27f5b2f732d0ad0dcfa8cd683e9945a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API view_as { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_as") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_as(Tensor(a) self, Tensor other) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops