This view is limited to 50 files because it contains too many changes.  See the raw diff here.
Files changed (50) hide show
  1. README.md +0 -3
  2. build.toml +7 -8
  3. build/{torch27-cxx11-cu118-x86_64-linux → torch25-cxx11-cu118-x86_64-linux}/activation/__init__.py +9 -14
  4. build/{torch26-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so → torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so} +2 -2
  5. build/{torch27-cxx11-cu118-x86_64-linux → torch25-cxx11-cu118-x86_64-linux}/activation/_ops.py +3 -3
  6. build/{torch27-cxx11-cu126-x86_64-linux → torch25-cxx11-cu121-x86_64-linux}/activation/__init__.py +9 -14
  7. build/{torch26-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so → torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so} +2 -2
  8. build/{torch27-cxx11-cu126-x86_64-linux → torch25-cxx11-cu121-x86_64-linux}/activation/_ops.py +3 -3
  9. build/{torch27-cxx11-cu128-x86_64-linux → torch25-cxx11-cu124-x86_64-linux}/activation/__init__.py +9 -14
  10. build/{torch26-cxx11-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so → torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so} +2 -2
  11. build/{torch27-cxx11-cu128-x86_64-linux → torch25-cxx11-cu124-x86_64-linux}/activation/_ops.py +3 -3
  12. build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py +47 -0
  13. build/{torch26-cxx98-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so → torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so} +2 -2
  14. build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py +9 -0
  15. build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py +47 -0
  16. build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so +3 -0
  17. build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py +9 -0
  18. build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py +47 -0
  19. build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so +3 -0
  20. build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py +9 -0
  21. build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py +9 -14
  22. build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so +3 -0
  23. build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py +3 -3
  24. build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py +0 -79
  25. build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py +9 -14
  26. build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so +3 -0
  27. build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py +3 -3
  28. build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py +0 -79
  29. build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py +9 -14
  30. build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so +3 -0
  31. build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py +3 -3
  32. build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py +0 -79
  33. build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py +9 -14
  34. build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so +3 -0
  35. build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py +3 -3
  36. build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py +0 -79
  37. build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py +9 -14
  38. build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so +0 -3
  39. build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so +3 -0
  40. build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py +3 -3
  41. build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py +0 -79
  42. build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py +9 -14
  43. build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so +0 -3
  44. build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so +3 -0
  45. build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py +3 -3
  46. build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py +0 -79
  47. build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so +0 -3
  48. build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py +0 -79
  49. build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so +0 -3
  50. build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py +0 -79
README.md CHANGED
@@ -2,9 +2,6 @@
2
  tags:
3
  - kernel
4
  ---
5
-
6
- ![Status](https://hubwebhook.dholtz.com/shield?repo=kernels-community/activation)
7
-
8
  ## Activation
9
 
10
  Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu).
 
2
  tags:
3
  - kernel
4
  ---
 
 
 
5
  ## Activation
6
 
7
  Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu).
build.toml CHANGED
@@ -1,18 +1,17 @@
1
  [general]
2
  name = "activation"
3
- universal = false
4
 
5
  [torch]
6
  src = [
7
- "torch-ext/torch_binding.cpp",
8
- "torch-ext/torch_binding.h",
9
  ]
10
 
11
  [kernel.activation]
12
- backend = "cuda"
13
- depends = ["torch"]
14
  src = [
15
- "activation/activation_kernels.cu",
16
- "activation/cuda_compat.h",
17
- "activation/dispatch_utils.h",
18
  ]
 
 
1
  [general]
2
  name = "activation"
 
3
 
4
  [torch]
5
  src = [
6
+ "torch-ext/torch_binding.cpp",
7
+ "torch-ext/torch_binding.h"
8
  ]
9
 
10
  [kernel.activation]
11
+ cuda-capabilities = [ "7.0", "7.2", "7.5", "8.0", "8.6", "8.7", "8.9", "9.0" ]
 
12
  src = [
13
+ "activation/activation_kernels.cu",
14
+ "activation/cuda_compat.h",
15
+ "activation/dispatch_utils.h",
16
  ]
17
+ depends = [ "torch" ]
build/{torch27-cxx11-cu118-x86_64-linux → torch25-cxx11-cu118-x86_64-linux}/activation/__init__.py RENAMED
@@ -1,8 +1,15 @@
1
  import torch
2
 
3
- from ._ops import ops
 
 
 
 
 
4
 
5
- from . import layers
 
 
6
 
7
 
8
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
@@ -38,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
38
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
  ops.gelu_quick(out, x)
40
  return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
1
  import torch
2
 
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
 
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
 
14
 
15
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
 
45
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
  ops.gelu_quick(out, x)
47
  return out
 
 
 
 
 
 
 
 
 
 
 
 
build/{torch26-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so → torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b925dc27b6a9afd5b6d11e454275222c531a92f7ca27958ac81a78c580665e4d
3
- size 2448088
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d50cdabfbed1df74e921ac34ff00bca0555977b14ef8082ddae7b1f30985a494
3
+ size 2370160
build/{torch27-cxx11-cu118-x86_64-linux → torch25-cxx11-cu118-x86_64-linux}/activation/_ops.py RENAMED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_e99cc09_dirty
3
- ops = torch.ops._activation_e99cc09_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_e99cc09_dirty::{op_name}"
 
1
  import torch
2
+ from . import _activation_o63kkyjirmkf4
3
+ ops = torch.ops._activation_o63kkyjirmkf4
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_o63kkyjirmkf4::{op_name}"
build/{torch27-cxx11-cu126-x86_64-linux → torch25-cxx11-cu121-x86_64-linux}/activation/__init__.py RENAMED
@@ -1,8 +1,15 @@
1
  import torch
2
 
3
- from ._ops import ops
 
 
 
 
 
4
 
5
- from . import layers
 
 
6
 
7
 
8
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
@@ -38,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
38
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
  ops.gelu_quick(out, x)
40
  return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
1
  import torch
2
 
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
 
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
 
14
 
15
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
 
45
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
  ops.gelu_quick(out, x)
47
  return out
 
 
 
 
 
 
 
 
 
 
 
 
build/{torch26-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so → torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70e544ad6448a5576d26147f48403f3e9e593f4a2e24167dc8acb81ce3b7932e
3
- size 2518600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bd0709ef09c8f0c18d1dc4a36c8096c59459bece61f5f5dbea95d1e73f54d44
3
+ size 2393264
build/{torch27-cxx11-cu126-x86_64-linux → torch25-cxx11-cu121-x86_64-linux}/activation/_ops.py RENAMED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_e99cc09_dirty
3
- ops = torch.ops._activation_e99cc09_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_e99cc09_dirty::{op_name}"
 
1
  import torch
2
+ from . import _activation_vrl36m2ejer54
3
+ ops = torch.ops._activation_vrl36m2ejer54
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_vrl36m2ejer54::{op_name}"
build/{torch27-cxx11-cu128-x86_64-linux → torch25-cxx11-cu124-x86_64-linux}/activation/__init__.py RENAMED
@@ -1,8 +1,15 @@
1
  import torch
2
 
3
- from ._ops import ops
 
 
 
 
 
4
 
5
- from . import layers
 
 
6
 
7
 
8
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
@@ -38,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
38
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
  ops.gelu_quick(out, x)
40
  return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
1
  import torch
2
 
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
 
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
 
14
 
15
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
 
45
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
  ops.gelu_quick(out, x)
47
  return out
 
 
 
 
 
 
 
 
 
 
 
 
build/{torch26-cxx11-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so → torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfdbe510752b57a8dc4671f744bb0a2da5b1646e0b9a19fec02f1505ba044c8c
3
- size 2509960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8353447f64e7d2df1a6a341d9c53bced53abef267f079923ae774170d0d57c53
3
+ size 2427936
build/{torch27-cxx11-cu128-x86_64-linux → torch25-cxx11-cu124-x86_64-linux}/activation/_ops.py RENAMED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_e99cc09_dirty
3
- ops = torch.ops._activation_e99cc09_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_e99cc09_dirty::{op_name}"
 
1
  import torch
2
+ from . import _activation_va3moa75vw7c2
3
+ ops = torch.ops._activation_va3moa75vw7c2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_va3moa75vw7c2::{op_name}"
build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
+
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
+
14
+
15
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
16
+ ops.silu_and_mul(out, x)
17
+ return out
18
+
19
+
20
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
21
+ ops.gelu_and_mul(out, x)
22
+ return out
23
+
24
+
25
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
26
+ ops.gelu_tanh_and_mul(out, x)
27
+ return out
28
+
29
+
30
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
31
+ ops.fatrelu_and_mul(out, x, threshold)
32
+ return out
33
+
34
+
35
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
36
+ ops.gelu_fast(out, x)
37
+ return out
38
+
39
+
40
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
41
+ ops.gelu_new(out, x)
42
+ return out
43
+
44
+
45
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
+ ops.gelu_quick(out, x)
47
+ return out
build/{torch26-cxx98-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so → torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60fd224c33657558f03be5be57cc8d35ade23225b1abd71557b170c8a7010cd1
3
- size 2440576
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df184a6315118d787a1bd6b435cb45f1ca7828445a1f1c0e55c57645cfbba43a
3
+ size 2362600
build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_qr3gs3eckeig4
3
+ ops = torch.ops._activation_qr3gs3eckeig4
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_qr3gs3eckeig4::{op_name}"
build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
+
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
+
14
+
15
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
16
+ ops.silu_and_mul(out, x)
17
+ return out
18
+
19
+
20
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
21
+ ops.gelu_and_mul(out, x)
22
+ return out
23
+
24
+
25
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
26
+ ops.gelu_tanh_and_mul(out, x)
27
+ return out
28
+
29
+
30
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
31
+ ops.fatrelu_and_mul(out, x, threshold)
32
+ return out
33
+
34
+
35
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
36
+ ops.gelu_fast(out, x)
37
+ return out
38
+
39
+
40
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
41
+ ops.gelu_new(out, x)
42
+ return out
43
+
44
+
45
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
+ ops.gelu_quick(out, x)
47
+ return out
build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccb13cfc2e45cf483e8b9f77f1760f28b48bcf185508d51b32d45bc759c4e8bb
3
+ size 2385440
build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_p7gbzt25w3zg2
3
+ ops = torch.ops._activation_p7gbzt25w3zg2
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_p7gbzt25w3zg2::{op_name}"
build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
+
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
+
14
+
15
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
16
+ ops.silu_and_mul(out, x)
17
+ return out
18
+
19
+
20
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
21
+ ops.gelu_and_mul(out, x)
22
+ return out
23
+
24
+
25
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
26
+ ops.gelu_tanh_and_mul(out, x)
27
+ return out
28
+
29
+
30
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
31
+ ops.fatrelu_and_mul(out, x, threshold)
32
+ return out
33
+
34
+
35
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
36
+ ops.gelu_fast(out, x)
37
+ return out
38
+
39
+
40
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
41
+ ops.gelu_new(out, x)
42
+ return out
43
+
44
+
45
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
+ ops.gelu_quick(out, x)
47
+ return out
build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f8048853e8cb06e8574a9a9497800d2be438f7989d79f44dcf2e0ced38a75a9
3
+ size 2420192
build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_jg7yaigtn7wco
3
+ ops = torch.ops._activation_jg7yaigtn7wco
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_jg7yaigtn7wco::{op_name}"
build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py CHANGED
@@ -1,8 +1,15 @@
1
  import torch
2
 
3
- from ._ops import ops
 
 
 
 
 
4
 
5
- from . import layers
 
 
6
 
7
 
8
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
@@ -38,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
38
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
  ops.gelu_quick(out, x)
40
  return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
1
  import torch
2
 
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
 
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
 
14
 
15
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
 
45
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
  ops.gelu_quick(out, x)
47
  return out
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cde5439e78ba0e1aaa1937d798b214b46d38cbab8e4384b93a22239fed1a4dd4
3
+ size 2370264
build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_e99cc09_dirty
3
- ops = torch.ops._activation_e99cc09_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_e99cc09_dirty::{op_name}"
 
1
  import torch
2
+ from . import _activation_ncisyrun7guwk
3
+ ops = torch.ops._activation_ncisyrun7guwk
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_ncisyrun7guwk::{op_name}"
build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py DELETED
@@ -1,79 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- can_torch_compile: bool = True
9
-
10
- def forward(self, x: torch.Tensor):
11
- d = x.shape[-1] // 2
12
- output_shape = x.shape[:-1] + (d,)
13
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
14
- ops.silu_and_mul(out, x)
15
- return out
16
-
17
-
18
- class GeluAndMul(nn.Module):
19
- can_torch_compile: bool = True
20
-
21
- def forward(self, x: torch.Tensor):
22
- d = x.shape[-1] // 2
23
- output_shape = x.shape[:-1] + (d,)
24
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
- ops.gelu_and_mul(out, x)
26
- return out
27
-
28
-
29
- class GeluTanhAndMul(nn.Module):
30
- can_torch_compile: bool = True
31
-
32
- def forward(self, x: torch.Tensor):
33
- d = x.shape[-1] // 2
34
- output_shape = x.shape[:-1] + (d,)
35
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
36
- ops.gelu_tanh_and_mul(out, x)
37
- return out
38
-
39
-
40
- class FatreluAndMul(nn.Module):
41
- can_torch_compile: bool = True
42
-
43
- def __init__(self, threshold: float = 0.0):
44
- super().__init__()
45
- self.threshold = threshold
46
-
47
- def forward(self, x: torch.Tensor):
48
- d = x.shape[-1] // 2
49
- output_shape = x.shape[:-1] + (d,)
50
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
51
- ops.fatrelu_and_mul(out, x, self.threshold)
52
- return out
53
-
54
-
55
- class FastGELU(nn.Module):
56
- can_torch_compile: bool = True
57
-
58
- def forward(self, x: torch.Tensor) -> torch.Tensor:
59
- out = torch.empty_like(x)
60
- ops.gelu_fast(out, x)
61
- return out
62
-
63
-
64
- class NewGELU(nn.Module):
65
- can_torch_compile: bool = True
66
-
67
- def forward(self, x: torch.Tensor) -> torch.Tensor:
68
- out = torch.empty_like(x)
69
- ops.gelu_new(out, x)
70
- return out
71
-
72
-
73
- class QuickGELU(nn.Module):
74
- can_torch_compile: bool = True
75
-
76
- def forward(self, x: torch.Tensor) -> torch.Tensor:
77
- out = torch.empty_like(x)
78
- ops.gelu_quick(out, x)
79
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py CHANGED
@@ -1,8 +1,15 @@
1
  import torch
2
 
3
- from ._ops import ops
 
 
 
 
 
4
 
5
- from . import layers
 
 
6
 
7
 
8
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
@@ -38,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
38
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
  ops.gelu_quick(out, x)
40
  return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
1
  import torch
2
 
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
 
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
 
14
 
15
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
 
45
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
  ops.gelu_quick(out, x)
47
  return out
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6bd20d411c51fc8729b15cab6a60c5c9185222474aa035489e1bff299d76682
3
+ size 2428040
build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_e99cc09_dirty
3
- ops = torch.ops._activation_e99cc09_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_e99cc09_dirty::{op_name}"
 
1
  import torch
2
+ from . import _activation_ochhfvlnc3vyc
3
+ ops = torch.ops._activation_ochhfvlnc3vyc
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_ochhfvlnc3vyc::{op_name}"
build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py DELETED
@@ -1,79 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- can_torch_compile: bool = True
9
-
10
- def forward(self, x: torch.Tensor):
11
- d = x.shape[-1] // 2
12
- output_shape = x.shape[:-1] + (d,)
13
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
14
- ops.silu_and_mul(out, x)
15
- return out
16
-
17
-
18
- class GeluAndMul(nn.Module):
19
- can_torch_compile: bool = True
20
-
21
- def forward(self, x: torch.Tensor):
22
- d = x.shape[-1] // 2
23
- output_shape = x.shape[:-1] + (d,)
24
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
- ops.gelu_and_mul(out, x)
26
- return out
27
-
28
-
29
- class GeluTanhAndMul(nn.Module):
30
- can_torch_compile: bool = True
31
-
32
- def forward(self, x: torch.Tensor):
33
- d = x.shape[-1] // 2
34
- output_shape = x.shape[:-1] + (d,)
35
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
36
- ops.gelu_tanh_and_mul(out, x)
37
- return out
38
-
39
-
40
- class FatreluAndMul(nn.Module):
41
- can_torch_compile: bool = True
42
-
43
- def __init__(self, threshold: float = 0.0):
44
- super().__init__()
45
- self.threshold = threshold
46
-
47
- def forward(self, x: torch.Tensor):
48
- d = x.shape[-1] // 2
49
- output_shape = x.shape[:-1] + (d,)
50
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
51
- ops.fatrelu_and_mul(out, x, self.threshold)
52
- return out
53
-
54
-
55
- class FastGELU(nn.Module):
56
- can_torch_compile: bool = True
57
-
58
- def forward(self, x: torch.Tensor) -> torch.Tensor:
59
- out = torch.empty_like(x)
60
- ops.gelu_fast(out, x)
61
- return out
62
-
63
-
64
- class NewGELU(nn.Module):
65
- can_torch_compile: bool = True
66
-
67
- def forward(self, x: torch.Tensor) -> torch.Tensor:
68
- out = torch.empty_like(x)
69
- ops.gelu_new(out, x)
70
- return out
71
-
72
-
73
- class QuickGELU(nn.Module):
74
- can_torch_compile: bool = True
75
-
76
- def forward(self, x: torch.Tensor) -> torch.Tensor:
77
- out = torch.empty_like(x)
78
- ops.gelu_quick(out, x)
79
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py CHANGED
@@ -1,8 +1,15 @@
1
  import torch
2
 
3
- from ._ops import ops
 
 
 
 
 
4
 
5
- from . import layers
 
 
6
 
7
 
8
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
@@ -38,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
38
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
  ops.gelu_quick(out, x)
40
  return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
1
  import torch
2
 
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
 
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
 
14
 
15
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
 
45
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
  ops.gelu_quick(out, x)
47
  return out
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41c18b20c2bf8c49d2d3088a9bc1aad4293df0b57eafc9b141a9e8e595fe551a
3
+ size 2436672
build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_e99cc09_dirty
3
- ops = torch.ops._activation_e99cc09_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_e99cc09_dirty::{op_name}"
 
1
  import torch
2
+ from . import _activation_u6vnqubnicksq
3
+ ops = torch.ops._activation_u6vnqubnicksq
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_u6vnqubnicksq::{op_name}"
build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py DELETED
@@ -1,79 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- can_torch_compile: bool = True
9
-
10
- def forward(self, x: torch.Tensor):
11
- d = x.shape[-1] // 2
12
- output_shape = x.shape[:-1] + (d,)
13
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
14
- ops.silu_and_mul(out, x)
15
- return out
16
-
17
-
18
- class GeluAndMul(nn.Module):
19
- can_torch_compile: bool = True
20
-
21
- def forward(self, x: torch.Tensor):
22
- d = x.shape[-1] // 2
23
- output_shape = x.shape[:-1] + (d,)
24
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
- ops.gelu_and_mul(out, x)
26
- return out
27
-
28
-
29
- class GeluTanhAndMul(nn.Module):
30
- can_torch_compile: bool = True
31
-
32
- def forward(self, x: torch.Tensor):
33
- d = x.shape[-1] // 2
34
- output_shape = x.shape[:-1] + (d,)
35
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
36
- ops.gelu_tanh_and_mul(out, x)
37
- return out
38
-
39
-
40
- class FatreluAndMul(nn.Module):
41
- can_torch_compile: bool = True
42
-
43
- def __init__(self, threshold: float = 0.0):
44
- super().__init__()
45
- self.threshold = threshold
46
-
47
- def forward(self, x: torch.Tensor):
48
- d = x.shape[-1] // 2
49
- output_shape = x.shape[:-1] + (d,)
50
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
51
- ops.fatrelu_and_mul(out, x, self.threshold)
52
- return out
53
-
54
-
55
- class FastGELU(nn.Module):
56
- can_torch_compile: bool = True
57
-
58
- def forward(self, x: torch.Tensor) -> torch.Tensor:
59
- out = torch.empty_like(x)
60
- ops.gelu_fast(out, x)
61
- return out
62
-
63
-
64
- class NewGELU(nn.Module):
65
- can_torch_compile: bool = True
66
-
67
- def forward(self, x: torch.Tensor) -> torch.Tensor:
68
- out = torch.empty_like(x)
69
- ops.gelu_new(out, x)
70
- return out
71
-
72
-
73
- class QuickGELU(nn.Module):
74
- can_torch_compile: bool = True
75
-
76
- def forward(self, x: torch.Tensor) -> torch.Tensor:
77
- out = torch.empty_like(x)
78
- ops.gelu_quick(out, x)
79
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py CHANGED
@@ -1,8 +1,15 @@
1
  import torch
2
 
3
- from ._ops import ops
 
 
 
 
 
4
 
5
- from . import layers
 
 
6
 
7
 
8
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
@@ -38,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
38
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
  ops.gelu_quick(out, x)
40
  return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
1
  import torch
2
 
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
 
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
 
14
 
15
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
 
45
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
  ops.gelu_quick(out, x)
47
  return out
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfbcd5da358cd5cb7982d19c8880cf4db6f08b46622a7a953f755ad59e4e1492
3
+ size 2362752
build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_e99cc09_dirty
3
- ops = torch.ops._activation_e99cc09_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_e99cc09_dirty::{op_name}"
 
1
  import torch
2
+ from . import _activation_2vn6ty3gfqfb6
3
+ ops = torch.ops._activation_2vn6ty3gfqfb6
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_2vn6ty3gfqfb6::{op_name}"
build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py DELETED
@@ -1,79 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- can_torch_compile: bool = True
9
-
10
- def forward(self, x: torch.Tensor):
11
- d = x.shape[-1] // 2
12
- output_shape = x.shape[:-1] + (d,)
13
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
14
- ops.silu_and_mul(out, x)
15
- return out
16
-
17
-
18
- class GeluAndMul(nn.Module):
19
- can_torch_compile: bool = True
20
-
21
- def forward(self, x: torch.Tensor):
22
- d = x.shape[-1] // 2
23
- output_shape = x.shape[:-1] + (d,)
24
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
- ops.gelu_and_mul(out, x)
26
- return out
27
-
28
-
29
- class GeluTanhAndMul(nn.Module):
30
- can_torch_compile: bool = True
31
-
32
- def forward(self, x: torch.Tensor):
33
- d = x.shape[-1] // 2
34
- output_shape = x.shape[:-1] + (d,)
35
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
36
- ops.gelu_tanh_and_mul(out, x)
37
- return out
38
-
39
-
40
- class FatreluAndMul(nn.Module):
41
- can_torch_compile: bool = True
42
-
43
- def __init__(self, threshold: float = 0.0):
44
- super().__init__()
45
- self.threshold = threshold
46
-
47
- def forward(self, x: torch.Tensor):
48
- d = x.shape[-1] // 2
49
- output_shape = x.shape[:-1] + (d,)
50
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
51
- ops.fatrelu_and_mul(out, x, self.threshold)
52
- return out
53
-
54
-
55
- class FastGELU(nn.Module):
56
- can_torch_compile: bool = True
57
-
58
- def forward(self, x: torch.Tensor) -> torch.Tensor:
59
- out = torch.empty_like(x)
60
- ops.gelu_fast(out, x)
61
- return out
62
-
63
-
64
- class NewGELU(nn.Module):
65
- can_torch_compile: bool = True
66
-
67
- def forward(self, x: torch.Tensor) -> torch.Tensor:
68
- out = torch.empty_like(x)
69
- ops.gelu_new(out, x)
70
- return out
71
-
72
-
73
- class QuickGELU(nn.Module):
74
- can_torch_compile: bool = True
75
-
76
- def forward(self, x: torch.Tensor) -> torch.Tensor:
77
- out = torch.empty_like(x)
78
- ops.gelu_quick(out, x)
79
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py CHANGED
@@ -1,8 +1,15 @@
1
  import torch
2
 
3
- from ._ops import ops
 
 
 
 
 
4
 
5
- from . import layers
 
 
6
 
7
 
8
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
@@ -38,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
38
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
  ops.gelu_quick(out, x)
40
  return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
1
  import torch
2
 
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
 
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
 
14
 
15
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
 
45
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
  ops.gelu_quick(out, x)
47
  return out
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e364773259dc1b91f3c0d3b076da83c5a9c6ee18ffdace30315c602dffd1dabe
3
- size 2502264
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1bc928823117c800904bcd3492bf1a0c65a32f6d8a842dc039f55e29831ab49
3
+ size 2420344
build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_e99cc09_dirty
3
- ops = torch.ops._activation_e99cc09_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_e99cc09_dirty::{op_name}"
 
1
  import torch
2
+ from . import _activation_myvteedxdpqc6
3
+ ops = torch.ops._activation_myvteedxdpqc6
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_myvteedxdpqc6::{op_name}"
build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py DELETED
@@ -1,79 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- can_torch_compile: bool = True
9
-
10
- def forward(self, x: torch.Tensor):
11
- d = x.shape[-1] // 2
12
- output_shape = x.shape[:-1] + (d,)
13
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
14
- ops.silu_and_mul(out, x)
15
- return out
16
-
17
-
18
- class GeluAndMul(nn.Module):
19
- can_torch_compile: bool = True
20
-
21
- def forward(self, x: torch.Tensor):
22
- d = x.shape[-1] // 2
23
- output_shape = x.shape[:-1] + (d,)
24
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
- ops.gelu_and_mul(out, x)
26
- return out
27
-
28
-
29
- class GeluTanhAndMul(nn.Module):
30
- can_torch_compile: bool = True
31
-
32
- def forward(self, x: torch.Tensor):
33
- d = x.shape[-1] // 2
34
- output_shape = x.shape[:-1] + (d,)
35
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
36
- ops.gelu_tanh_and_mul(out, x)
37
- return out
38
-
39
-
40
- class FatreluAndMul(nn.Module):
41
- can_torch_compile: bool = True
42
-
43
- def __init__(self, threshold: float = 0.0):
44
- super().__init__()
45
- self.threshold = threshold
46
-
47
- def forward(self, x: torch.Tensor):
48
- d = x.shape[-1] // 2
49
- output_shape = x.shape[:-1] + (d,)
50
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
51
- ops.fatrelu_and_mul(out, x, self.threshold)
52
- return out
53
-
54
-
55
- class FastGELU(nn.Module):
56
- can_torch_compile: bool = True
57
-
58
- def forward(self, x: torch.Tensor) -> torch.Tensor:
59
- out = torch.empty_like(x)
60
- ops.gelu_fast(out, x)
61
- return out
62
-
63
-
64
- class NewGELU(nn.Module):
65
- can_torch_compile: bool = True
66
-
67
- def forward(self, x: torch.Tensor) -> torch.Tensor:
68
- out = torch.empty_like(x)
69
- ops.gelu_new(out, x)
70
- return out
71
-
72
-
73
- class QuickGELU(nn.Module):
74
- can_torch_compile: bool = True
75
-
76
- def forward(self, x: torch.Tensor) -> torch.Tensor:
77
- out = torch.empty_like(x)
78
- ops.gelu_quick(out, x)
79
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py CHANGED
@@ -1,8 +1,15 @@
1
  import torch
2
 
3
- from ._ops import ops
 
 
 
 
 
4
 
5
- from . import layers
 
 
6
 
7
 
8
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
@@ -38,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
38
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
  ops.gelu_quick(out, x)
40
  return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
1
  import torch
2
 
3
+ try:
4
+ from ._ops import ops
5
+ except ImportError as e:
6
+ # Fallback for local development.
7
+ try:
8
+ import _activation
9
 
10
+ ops = torch.ops._activition
11
+ except ImportError:
12
+ raise e
13
 
14
 
15
  def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
 
45
  def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
46
  ops.gelu_quick(out, x)
47
  return out
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ac88cc0d3c65ab283d20608f3a097be29ee572e7856f10f8d7919536efd95b4
3
- size 2506808
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:474727e434a9cd4ec984a6da7124992ead4ca0fefce9581d0fd503e36c065aed
3
+ size 2424888
build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_e99cc09_dirty
3
- ops = torch.ops._activation_e99cc09_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_e99cc09_dirty::{op_name}"
 
1
  import torch
2
+ from . import _activation_rbswus6emrhm2
3
+ ops = torch.ops._activation_rbswus6emrhm2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_rbswus6emrhm2::{op_name}"
build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py DELETED
@@ -1,79 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- can_torch_compile: bool = True
9
-
10
- def forward(self, x: torch.Tensor):
11
- d = x.shape[-1] // 2
12
- output_shape = x.shape[:-1] + (d,)
13
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
14
- ops.silu_and_mul(out, x)
15
- return out
16
-
17
-
18
- class GeluAndMul(nn.Module):
19
- can_torch_compile: bool = True
20
-
21
- def forward(self, x: torch.Tensor):
22
- d = x.shape[-1] // 2
23
- output_shape = x.shape[:-1] + (d,)
24
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
- ops.gelu_and_mul(out, x)
26
- return out
27
-
28
-
29
- class GeluTanhAndMul(nn.Module):
30
- can_torch_compile: bool = True
31
-
32
- def forward(self, x: torch.Tensor):
33
- d = x.shape[-1] // 2
34
- output_shape = x.shape[:-1] + (d,)
35
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
36
- ops.gelu_tanh_and_mul(out, x)
37
- return out
38
-
39
-
40
- class FatreluAndMul(nn.Module):
41
- can_torch_compile: bool = True
42
-
43
- def __init__(self, threshold: float = 0.0):
44
- super().__init__()
45
- self.threshold = threshold
46
-
47
- def forward(self, x: torch.Tensor):
48
- d = x.shape[-1] // 2
49
- output_shape = x.shape[:-1] + (d,)
50
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
51
- ops.fatrelu_and_mul(out, x, self.threshold)
52
- return out
53
-
54
-
55
- class FastGELU(nn.Module):
56
- can_torch_compile: bool = True
57
-
58
- def forward(self, x: torch.Tensor) -> torch.Tensor:
59
- out = torch.empty_like(x)
60
- ops.gelu_fast(out, x)
61
- return out
62
-
63
-
64
- class NewGELU(nn.Module):
65
- can_torch_compile: bool = True
66
-
67
- def forward(self, x: torch.Tensor) -> torch.Tensor:
68
- out = torch.empty_like(x)
69
- ops.gelu_new(out, x)
70
- return out
71
-
72
-
73
- class QuickGELU(nn.Module):
74
- can_torch_compile: bool = True
75
-
76
- def forward(self, x: torch.Tensor) -> torch.Tensor:
77
- out = torch.empty_like(x)
78
- ops.gelu_quick(out, x)
79
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e4f9e647eea40d3d3801d5ee57d4917e4c2e8dbfd87cdfebdc40b1b0a1c571fe
3
- size 2448184
 
 
 
 
build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py DELETED
@@ -1,79 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- can_torch_compile: bool = True
9
-
10
- def forward(self, x: torch.Tensor):
11
- d = x.shape[-1] // 2
12
- output_shape = x.shape[:-1] + (d,)
13
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
14
- ops.silu_and_mul(out, x)
15
- return out
16
-
17
-
18
- class GeluAndMul(nn.Module):
19
- can_torch_compile: bool = True
20
-
21
- def forward(self, x: torch.Tensor):
22
- d = x.shape[-1] // 2
23
- output_shape = x.shape[:-1] + (d,)
24
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
- ops.gelu_and_mul(out, x)
26
- return out
27
-
28
-
29
- class GeluTanhAndMul(nn.Module):
30
- can_torch_compile: bool = True
31
-
32
- def forward(self, x: torch.Tensor):
33
- d = x.shape[-1] // 2
34
- output_shape = x.shape[:-1] + (d,)
35
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
36
- ops.gelu_tanh_and_mul(out, x)
37
- return out
38
-
39
-
40
- class FatreluAndMul(nn.Module):
41
- can_torch_compile: bool = True
42
-
43
- def __init__(self, threshold: float = 0.0):
44
- super().__init__()
45
- self.threshold = threshold
46
-
47
- def forward(self, x: torch.Tensor):
48
- d = x.shape[-1] // 2
49
- output_shape = x.shape[:-1] + (d,)
50
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
51
- ops.fatrelu_and_mul(out, x, self.threshold)
52
- return out
53
-
54
-
55
- class FastGELU(nn.Module):
56
- can_torch_compile: bool = True
57
-
58
- def forward(self, x: torch.Tensor) -> torch.Tensor:
59
- out = torch.empty_like(x)
60
- ops.gelu_fast(out, x)
61
- return out
62
-
63
-
64
- class NewGELU(nn.Module):
65
- can_torch_compile: bool = True
66
-
67
- def forward(self, x: torch.Tensor) -> torch.Tensor:
68
- out = torch.empty_like(x)
69
- ops.gelu_new(out, x)
70
- return out
71
-
72
-
73
- class QuickGELU(nn.Module):
74
- can_torch_compile: bool = True
75
-
76
- def forward(self, x: torch.Tensor) -> torch.Tensor:
77
- out = torch.empty_like(x)
78
- ops.gelu_quick(out, x)
79
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2b72ff2a0f2253e4dfe028842b5f15cabf2647d7812bf4662a2de510ca0c489
3
- size 2518632
 
 
 
 
build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py DELETED
@@ -1,79 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- can_torch_compile: bool = True
9
-
10
- def forward(self, x: torch.Tensor):
11
- d = x.shape[-1] // 2
12
- output_shape = x.shape[:-1] + (d,)
13
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
14
- ops.silu_and_mul(out, x)
15
- return out
16
-
17
-
18
- class GeluAndMul(nn.Module):
19
- can_torch_compile: bool = True
20
-
21
- def forward(self, x: torch.Tensor):
22
- d = x.shape[-1] // 2
23
- output_shape = x.shape[:-1] + (d,)
24
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
- ops.gelu_and_mul(out, x)
26
- return out
27
-
28
-
29
- class GeluTanhAndMul(nn.Module):
30
- can_torch_compile: bool = True
31
-
32
- def forward(self, x: torch.Tensor):
33
- d = x.shape[-1] // 2
34
- output_shape = x.shape[:-1] + (d,)
35
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
36
- ops.gelu_tanh_and_mul(out, x)
37
- return out
38
-
39
-
40
- class FatreluAndMul(nn.Module):
41
- can_torch_compile: bool = True
42
-
43
- def __init__(self, threshold: float = 0.0):
44
- super().__init__()
45
- self.threshold = threshold
46
-
47
- def forward(self, x: torch.Tensor):
48
- d = x.shape[-1] // 2
49
- output_shape = x.shape[:-1] + (d,)
50
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
51
- ops.fatrelu_and_mul(out, x, self.threshold)
52
- return out
53
-
54
-
55
- class FastGELU(nn.Module):
56
- can_torch_compile: bool = True
57
-
58
- def forward(self, x: torch.Tensor) -> torch.Tensor:
59
- out = torch.empty_like(x)
60
- ops.gelu_fast(out, x)
61
- return out
62
-
63
-
64
- class NewGELU(nn.Module):
65
- can_torch_compile: bool = True
66
-
67
- def forward(self, x: torch.Tensor) -> torch.Tensor:
68
- out = torch.empty_like(x)
69
- ops.gelu_new(out, x)
70
- return out
71
-
72
-
73
- class QuickGELU(nn.Module):
74
- can_torch_compile: bool = True
75
-
76
- def forward(self, x: torch.Tensor) -> torch.Tensor:
77
- out = torch.empty_like(x)
78
- ops.gelu_quick(out, x)
79
- return out