Kernels
danieldk HF Staff commited on
Commit
908968f
·
verified ·
1 Parent(s): e61c028

Build uploaded using `kernels`.

Browse files
Files changed (24) hide show
  1. build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc +0 -0
  2. build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc +0 -0
  3. build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc +0 -0
  4. build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/_ops.py +3 -3
  5. build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/{_quantization_eetq_6337ee0.abi3.so → _quantization_eetq_ee9ac42.abi3.so} +2 -2
  6. build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc +0 -0
  7. build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc +0 -0
  8. build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc +0 -0
  9. build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/_ops.py +3 -3
  10. build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/{_quantization_eetq_1d4b892.abi3.so → _quantization_eetq_ee9ac42.abi3.so} +2 -2
  11. build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py +3 -0
  12. build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc +0 -0
  13. build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc +0 -0
  14. build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc +0 -0
  15. build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/_ops.py +9 -0
  16. build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/_quantization_eetq_ee9ac42.abi3.so +3 -0
  17. build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/custom_ops.py +36 -0
  18. build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/__init__.py +3 -0
  19. build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc +0 -0
  20. build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc +0 -0
  21. build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc +0 -0
  22. build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/_ops.py +9 -0
  23. build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/_quantization_eetq_ee9ac42.abi3.so +3 -0
  24. build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/custom_ops.py +36 -0
build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (335 Bytes). View file
 
build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc ADDED
Binary file (548 Bytes). View file
 
build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc ADDED
Binary file (1.84 kB). View file
 
build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_6337ee0
3
- ops = torch.ops._quantization_eetq_6337ee0
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_6337ee0::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_ee9ac42
3
+ ops = torch.ops._quantization_eetq_ee9ac42
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_ee9ac42::{op_name}"
build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/{_quantization_eetq_6337ee0.abi3.so → _quantization_eetq_ee9ac42.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6022247112ebdaf553f6f92b459391e823fb10d28a224235e37be45dea3ed6a9
3
- size 37504664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c32dd6d144418fd7255d5555b634407890ad96b298c98a00bb8463d6f6dd9be
3
+ size 38028936
build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc differ
 
build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc differ
 
build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc and b/build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc differ
 
build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_1d4b892
3
- ops = torch.ops._quantization_eetq_1d4b892
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_1d4b892::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_ee9ac42
3
+ ops = torch.ops._quantization_eetq_ee9ac42
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_ee9ac42::{op_name}"
build/torch28-cxx11-cu129-aarch64-linux/quantization_eetq/{_quantization_eetq_1d4b892.abi3.so → _quantization_eetq_ee9ac42.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f9968edb45388c79bc28af40a3618d415c9149facdab15748d857f96b3be7ab
3
- size 38153728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3c53ac387f693a12937e3c6d8640832470cd801117269fcbc2f6a1a83382612
3
+ size 38743552
build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (335 Bytes). View file
 
build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc ADDED
Binary file (548 Bytes). View file
 
build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc ADDED
Binary file (1.84 kB). View file
 
build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_eetq_ee9ac42
3
+ ops = torch.ops._quantization_eetq_ee9ac42
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_eetq_ee9ac42::{op_name}"
build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/_quantization_eetq_ee9ac42.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fc556dbe02d86c5c0c8f932c88e38fb1252b9557e7c17917af5181e2678b9da
3
+ size 31535480
build/torch29-cxx11-cu126-aarch64-linux/quantization_eetq/custom_ops.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ def w8_a16_gemm(
8
+ input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
9
+ ) -> torch.Tensor:
10
+ return ops.w8_a16_gemm(input, weight, scale)
11
+
12
+
13
+ def w8_a16_gemm_(
14
+ input: torch.Tensor,
15
+ weight: torch.Tensor,
16
+ scale: torch.Tensor,
17
+ output: torch.Tensor,
18
+ m: int,
19
+ n: int,
20
+ k: int,
21
+ ) -> torch.Tensor:
22
+ return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
23
+
24
+
25
+ def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
26
+ return ops.preprocess_weights(origin_weight, is_int4)
27
+
28
+
29
+ def quant_weights(
30
+ origin_weight: torch.Tensor,
31
+ quant_type: torch.dtype,
32
+ return_unprocessed_quantized_tensor: bool,
33
+ ) -> List[torch.Tensor]:
34
+ return ops.quant_weights(
35
+ origin_weight, quant_type, return_unprocessed_quantized_tensor
36
+ )
build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (335 Bytes). View file
 
build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc ADDED
Binary file (548 Bytes). View file
 
build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc ADDED
Binary file (1.84 kB). View file
 
build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_eetq_ee9ac42
3
+ ops = torch.ops._quantization_eetq_ee9ac42
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_eetq_ee9ac42::{op_name}"
build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/_quantization_eetq_ee9ac42.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1bc25a027bfbe2c5b130fa3682d9130157ba0d4eae45f557e9243c4d16c944e
3
+ size 37956592
build/torch29-cxx11-cu128-aarch64-linux/quantization_eetq/custom_ops.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ def w8_a16_gemm(
8
+ input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
9
+ ) -> torch.Tensor:
10
+ return ops.w8_a16_gemm(input, weight, scale)
11
+
12
+
13
+ def w8_a16_gemm_(
14
+ input: torch.Tensor,
15
+ weight: torch.Tensor,
16
+ scale: torch.Tensor,
17
+ output: torch.Tensor,
18
+ m: int,
19
+ n: int,
20
+ k: int,
21
+ ) -> torch.Tensor:
22
+ return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
23
+
24
+
25
+ def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
26
+ return ops.preprocess_weights(origin_weight, is_int4)
27
+
28
+
29
+ def quant_weights(
30
+ origin_weight: torch.Tensor,
31
+ quant_type: torch.dtype,
32
+ return_unprocessed_quantized_tensor: bool,
33
+ ) -> List[torch.Tensor]:
34
+ return ops.quant_weights(
35
+ origin_weight, quant_type, return_unprocessed_quantized_tensor
36
+ )