Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- infer_4_33_0/lib/python3.10/site-packages/certifi/__init__.py +4 -0
- infer_4_33_0/lib/python3.10/site-packages/certifi/__main__.py +12 -0
- infer_4_33_0/lib/python3.10/site-packages/certifi/cacert.pem +0 -0
- infer_4_33_0/lib/python3.10/site-packages/certifi/core.py +114 -0
- infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/INSTALLER +1 -0
- infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/METADATA +32 -0
- infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/RECORD +21 -0
- infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/REQUESTED +0 -0
- infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/WHEEL +5 -0
- infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/top_level.txt +1 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/core.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/distribution_lib.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/export.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/image.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/layer.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/linalg.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/math.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/nn.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/numpy.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/optimizer.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/random.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/rnn.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/sparse.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/tensorboard.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/trainer.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/core.py +407 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/distribution_lib.py +265 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/linalg.py +89 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/math.py +298 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/nn.py +1197 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/numpy.py +1277 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/optimizer.py +112 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/rnn.py +226 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/tensorboard.py +23 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/openvino/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/openvino/__pycache__/nn.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/openvino/random.py +103 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/openvino/trainer.py +272 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/__init__.py +29 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/core.py +670 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/distribution_lib.py +87 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/export.py +32 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/image.py +493 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/layer.py +114 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/linalg.py +234 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/math.py +381 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/nn.py +1068 -0
- infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/numpy.py +2658 -0
.gitattributes
CHANGED
|
@@ -1084,3 +1084,6 @@ infer_4_37_2/lib/libquadmath.so filter=lfs diff=lfs merge=lfs -text
|
|
| 1084 |
infer_4_33_0/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1085 |
infer_4_33_0/lib/python3.10/site-packages/keras/src/ops/__pycache__/numpy.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1086 |
infer_4_33_0/lib/python3.10/site-packages/skimage/filters/rank/generic_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 1084 |
infer_4_33_0/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1085 |
infer_4_33_0/lib/python3.10/site-packages/keras/src/ops/__pycache__/numpy.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1086 |
infer_4_33_0/lib/python3.10/site-packages/skimage/filters/rank/generic_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1087 |
+
infer_4_37_2/lib/libubsan.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
|
| 1088 |
+
infer_4_37_2/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
|
| 1089 |
+
infer_4_33_0/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
infer_4_33_0/lib/python3.10/site-packages/certifi/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .core import contents, where
|
| 2 |
+
|
| 3 |
+
__all__ = ["contents", "where"]
|
| 4 |
+
__version__ = "2024.12.14"
|
infer_4_33_0/lib/python3.10/site-packages/certifi/__main__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
from certifi import contents, where
|
| 4 |
+
|
| 5 |
+
parser = argparse.ArgumentParser()
|
| 6 |
+
parser.add_argument("-c", "--contents", action="store_true")
|
| 7 |
+
args = parser.parse_args()
|
| 8 |
+
|
| 9 |
+
if args.contents:
|
| 10 |
+
print(contents())
|
| 11 |
+
else:
|
| 12 |
+
print(where())
|
infer_4_33_0/lib/python3.10/site-packages/certifi/cacert.pem
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
infer_4_33_0/lib/python3.10/site-packages/certifi/core.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
certifi.py
|
| 3 |
+
~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This module returns the installation location of cacert.pem or its contents.
|
| 6 |
+
"""
|
| 7 |
+
import sys
|
| 8 |
+
import atexit
|
| 9 |
+
|
| 10 |
+
def exit_cacert_ctx() -> None:
|
| 11 |
+
_CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
if sys.version_info >= (3, 11):
|
| 15 |
+
|
| 16 |
+
from importlib.resources import as_file, files
|
| 17 |
+
|
| 18 |
+
_CACERT_CTX = None
|
| 19 |
+
_CACERT_PATH = None
|
| 20 |
+
|
| 21 |
+
def where() -> str:
|
| 22 |
+
# This is slightly terrible, but we want to delay extracting the file
|
| 23 |
+
# in cases where we're inside of a zipimport situation until someone
|
| 24 |
+
# actually calls where(), but we don't want to re-extract the file
|
| 25 |
+
# on every call of where(), so we'll do it once then store it in a
|
| 26 |
+
# global variable.
|
| 27 |
+
global _CACERT_CTX
|
| 28 |
+
global _CACERT_PATH
|
| 29 |
+
if _CACERT_PATH is None:
|
| 30 |
+
# This is slightly janky, the importlib.resources API wants you to
|
| 31 |
+
# manage the cleanup of this file, so it doesn't actually return a
|
| 32 |
+
# path, it returns a context manager that will give you the path
|
| 33 |
+
# when you enter it and will do any cleanup when you leave it. In
|
| 34 |
+
# the common case of not needing a temporary file, it will just
|
| 35 |
+
# return the file system location and the __exit__() is a no-op.
|
| 36 |
+
#
|
| 37 |
+
# We also have to hold onto the actual context manager, because
|
| 38 |
+
# it will do the cleanup whenever it gets garbage collected, so
|
| 39 |
+
# we will also store that at the global level as well.
|
| 40 |
+
_CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
|
| 41 |
+
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
| 42 |
+
atexit.register(exit_cacert_ctx)
|
| 43 |
+
|
| 44 |
+
return _CACERT_PATH
|
| 45 |
+
|
| 46 |
+
def contents() -> str:
|
| 47 |
+
return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
|
| 48 |
+
|
| 49 |
+
elif sys.version_info >= (3, 7):
|
| 50 |
+
|
| 51 |
+
from importlib.resources import path as get_path, read_text
|
| 52 |
+
|
| 53 |
+
_CACERT_CTX = None
|
| 54 |
+
_CACERT_PATH = None
|
| 55 |
+
|
| 56 |
+
def where() -> str:
|
| 57 |
+
# This is slightly terrible, but we want to delay extracting the
|
| 58 |
+
# file in cases where we're inside of a zipimport situation until
|
| 59 |
+
# someone actually calls where(), but we don't want to re-extract
|
| 60 |
+
# the file on every call of where(), so we'll do it once then store
|
| 61 |
+
# it in a global variable.
|
| 62 |
+
global _CACERT_CTX
|
| 63 |
+
global _CACERT_PATH
|
| 64 |
+
if _CACERT_PATH is None:
|
| 65 |
+
# This is slightly janky, the importlib.resources API wants you
|
| 66 |
+
# to manage the cleanup of this file, so it doesn't actually
|
| 67 |
+
# return a path, it returns a context manager that will give
|
| 68 |
+
# you the path when you enter it and will do any cleanup when
|
| 69 |
+
# you leave it. In the common case of not needing a temporary
|
| 70 |
+
# file, it will just return the file system location and the
|
| 71 |
+
# __exit__() is a no-op.
|
| 72 |
+
#
|
| 73 |
+
# We also have to hold onto the actual context manager, because
|
| 74 |
+
# it will do the cleanup whenever it gets garbage collected, so
|
| 75 |
+
# we will also store that at the global level as well.
|
| 76 |
+
_CACERT_CTX = get_path("certifi", "cacert.pem")
|
| 77 |
+
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
| 78 |
+
atexit.register(exit_cacert_ctx)
|
| 79 |
+
|
| 80 |
+
return _CACERT_PATH
|
| 81 |
+
|
| 82 |
+
def contents() -> str:
|
| 83 |
+
return read_text("certifi", "cacert.pem", encoding="ascii")
|
| 84 |
+
|
| 85 |
+
else:
|
| 86 |
+
import os
|
| 87 |
+
import types
|
| 88 |
+
from typing import Union
|
| 89 |
+
|
| 90 |
+
Package = Union[types.ModuleType, str]
|
| 91 |
+
Resource = Union[str, "os.PathLike"]
|
| 92 |
+
|
| 93 |
+
# This fallback will work for Python versions prior to 3.7 that lack the
|
| 94 |
+
# importlib.resources module but relies on the existing `where` function
|
| 95 |
+
# so won't address issues with environments like PyOxidizer that don't set
|
| 96 |
+
# __file__ on modules.
|
| 97 |
+
def read_text(
|
| 98 |
+
package: Package,
|
| 99 |
+
resource: Resource,
|
| 100 |
+
encoding: str = 'utf-8',
|
| 101 |
+
errors: str = 'strict'
|
| 102 |
+
) -> str:
|
| 103 |
+
with open(where(), encoding=encoding) as data:
|
| 104 |
+
return data.read()
|
| 105 |
+
|
| 106 |
+
# If we don't have importlib.resources, then we will just do the old logic
|
| 107 |
+
# of assuming we're on the filesystem and munge the path directly.
|
| 108 |
+
def where() -> str:
|
| 109 |
+
f = os.path.dirname(__file__)
|
| 110 |
+
|
| 111 |
+
return os.path.join(f, "cacert.pem")
|
| 112 |
+
|
| 113 |
+
def contents() -> str:
|
| 114 |
+
return read_text("certifi", "cacert.pem", encoding="ascii")
|
infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: gast
|
| 3 |
+
Version: 0.6.0
|
| 4 |
+
Summary: Python AST that abstracts the underlying Python version
|
| 5 |
+
Home-page: https://github.com/serge-sans-paille/gast/
|
| 6 |
+
Author: serge-sans-paille
|
| 7 |
+
Author-email: [email protected]
|
| 8 |
+
License: BSD 3-Clause
|
| 9 |
+
Classifier: Development Status :: 4 - Beta
|
| 10 |
+
Classifier: Environment :: Console
|
| 11 |
+
Classifier: Intended Audience :: Developers
|
| 12 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 13 |
+
Classifier: Natural Language :: English
|
| 14 |
+
Classifier: Programming Language :: Python :: 2
|
| 15 |
+
Classifier: Programming Language :: Python :: 2.7
|
| 16 |
+
Classifier: Programming Language :: Python :: 3
|
| 17 |
+
Classifier: Programming Language :: Python :: 3.4
|
| 18 |
+
Classifier: Programming Language :: Python :: 3.5
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.6
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 25 |
+
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
|
| 26 |
+
License-File: LICENSE
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
A generic AST to represent Python2 and Python3's Abstract Syntax Tree(AST).
|
| 30 |
+
|
| 31 |
+
GAST provides a compatibility layer between the AST of various Python versions,
|
| 32 |
+
as produced by ``ast.parse`` from the standard ``ast`` module.
|
infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gast-0.6.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
gast-0.6.0.dist-info/LICENSE,sha256=agS7q9m0i-pr98C9PzoGLhR2s8QDp0ZEj9abDZAuFI8,1490
|
| 3 |
+
gast-0.6.0.dist-info/METADATA,sha256=ZaEdD89-rroMy_gAkTanC3FMB0W3BU6LMKo-ZJXDg60,1327
|
| 4 |
+
gast-0.6.0.dist-info/RECORD,,
|
| 5 |
+
gast-0.6.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
gast-0.6.0.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
|
| 7 |
+
gast-0.6.0.dist-info/top_level.txt,sha256=OZY5DvDNAf17-SMBR1pXPV7iJyIpR-6sFAgOfFc_Vz8,5
|
| 8 |
+
gast/__init__.py,sha256=qebTP3aM9LOaOeilaPxUlcP_PkcatAuS7_Q6uaLpsHQ,111
|
| 9 |
+
gast/__pycache__/__init__.cpython-310.pyc,,
|
| 10 |
+
gast/__pycache__/ast2.cpython-310.pyc,,
|
| 11 |
+
gast/__pycache__/ast3.cpython-310.pyc,,
|
| 12 |
+
gast/__pycache__/astn.cpython-310.pyc,,
|
| 13 |
+
gast/__pycache__/gast.cpython-310.pyc,,
|
| 14 |
+
gast/__pycache__/unparser.cpython-310.pyc,,
|
| 15 |
+
gast/__pycache__/version.cpython-310.pyc,,
|
| 16 |
+
gast/ast2.py,sha256=RP9OQ3D_cWmSyf36oWCQkbY2vfGhMVMbsQHXmbvu9nM,13446
|
| 17 |
+
gast/ast3.py,sha256=FHd_8TJL_NBQTYDETWkKJiobJM83O2ZjZZIZLnTF6Ss,17275
|
| 18 |
+
gast/astn.py,sha256=ucrgL8OMGAfWVpPwVUOVTA2ckSlTE83D67UU0rp94Q4,1057
|
| 19 |
+
gast/gast.py,sha256=aCT_jFyXCiT_e-YcwqDqSJH0O22qu6zpYTnpxmSVJZA,22465
|
| 20 |
+
gast/unparser.py,sha256=KwwXCWvZ2nu-XJc11SAPJHj2EQzMQC4Ow5pbH_KULV8,39633
|
| 21 |
+
gast/version.py,sha256=CBY3jsC-9HCm7eZ6CKD-sYLCejqOJ1pYWPQM4LGIXcI,22
|
infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/REQUESTED
ADDED
|
File without changes
|
infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: setuptools (70.2.0)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
infer_4_33_0/lib/python3.10/site-packages/gast-0.6.0.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
gast
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.19 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/core.cpython-310.pyc
ADDED
|
Binary file (12.5 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/distribution_lib.cpython-310.pyc
ADDED
|
Binary file (7.71 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/export.cpython-310.pyc
ADDED
|
Binary file (5.14 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/image.cpython-310.pyc
ADDED
|
Binary file (9.33 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/layer.cpython-310.pyc
ADDED
|
Binary file (328 Bytes). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/linalg.cpython-310.pyc
ADDED
|
Binary file (2.78 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/math.cpython-310.pyc
ADDED
|
Binary file (7.44 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/nn.cpython-310.pyc
ADDED
|
Binary file (24.8 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/numpy.cpython-310.pyc
ADDED
|
Binary file (29.6 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/optimizer.cpython-310.pyc
ADDED
|
Binary file (4.09 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/random.cpython-310.pyc
ADDED
|
Binary file (3.2 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/rnn.cpython-310.pyc
ADDED
|
Binary file (5.16 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/sparse.cpython-310.pyc
ADDED
|
Binary file (10.8 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/tensorboard.cpython-310.pyc
ADDED
|
Binary file (893 Bytes). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/__pycache__/trainer.cpython-310.pyc
ADDED
|
Binary file (22 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/core.py
ADDED
|
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import jax
|
| 2 |
+
import jax.experimental.sparse as jax_sparse
|
| 3 |
+
import jax.numpy as jnp
|
| 4 |
+
import ml_dtypes
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from keras.src import tree
|
| 8 |
+
from keras.src.backend.common import KerasVariable
|
| 9 |
+
from keras.src.backend.common import global_state
|
| 10 |
+
from keras.src.backend.common import standardize_dtype
|
| 11 |
+
from keras.src.backend.common.keras_tensor import KerasTensor
|
| 12 |
+
from keras.src.backend.common.name_scope import name_scope as base_name_scope
|
| 13 |
+
from keras.src.backend.common.stateless_scope import StatelessScope
|
| 14 |
+
from keras.src.backend.common.symbolic_scope import SymbolicScope
|
| 15 |
+
from keras.src.backend.jax import distribution_lib
|
| 16 |
+
|
| 17 |
+
SUPPORTS_SPARSE_TENSORS = True
|
| 18 |
+
IS_THREAD_SAFE = True
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Variable(KerasVariable):
|
| 22 |
+
def _initialize(self, value):
|
| 23 |
+
# Note that variable.shape is needed by distribution_lib
|
| 24 |
+
self._shape = self._validate_shape(value.shape)
|
| 25 |
+
# We can't import the keras/distribution/distribution_lib
|
| 26 |
+
# due to circular dependency.
|
| 27 |
+
distribution = global_state.get_global_attribute("distribution")
|
| 28 |
+
if distribution is not None:
|
| 29 |
+
self._layout = distribution_lib._to_jax_layout(
|
| 30 |
+
distribution.get_variable_layout(self)
|
| 31 |
+
)
|
| 32 |
+
else:
|
| 33 |
+
self._layout = None
|
| 34 |
+
self._direct_assign(value)
|
| 35 |
+
|
| 36 |
+
def _direct_assign(self, value):
|
| 37 |
+
if getattr(self, "_layout", None) is not None:
|
| 38 |
+
value = distribution_lib.distribute_variable(value, self._layout)
|
| 39 |
+
self._value = value
|
| 40 |
+
|
| 41 |
+
def _convert_to_tensor(self, value, dtype=None):
|
| 42 |
+
return convert_to_tensor(value, dtype=dtype, sparse=False)
|
| 43 |
+
|
| 44 |
+
# Overload native accessor.
|
| 45 |
+
def __jax_array__(self):
|
| 46 |
+
return self.value
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def convert_to_tensor(x, dtype=None, sparse=True):
|
| 50 |
+
if dtype is not None:
|
| 51 |
+
dtype = standardize_dtype(dtype)
|
| 52 |
+
if isinstance(x, (jnp.ndarray, jax.Array)) and (
|
| 53 |
+
dtype is None or x.dtype == dtype
|
| 54 |
+
):
|
| 55 |
+
# Skip the conversion early if the instance is already a JAX array.
|
| 56 |
+
# This is important in the multi-process context since jax.array(x) for
|
| 57 |
+
# an existing distributed jax array will raise error.
|
| 58 |
+
return x
|
| 59 |
+
|
| 60 |
+
if isinstance(x, Variable):
|
| 61 |
+
if dtype is not None and x.dtype != dtype:
|
| 62 |
+
return x.value.astype(dtype)
|
| 63 |
+
return x.value
|
| 64 |
+
|
| 65 |
+
if isinstance(x, jax_sparse.JAXSparse):
|
| 66 |
+
if sparse is not None and not sparse:
|
| 67 |
+
x = x.todense()
|
| 68 |
+
elif dtype is not None and x.dtype != dtype:
|
| 69 |
+
return x.astype(dtype)
|
| 70 |
+
else:
|
| 71 |
+
return x
|
| 72 |
+
|
| 73 |
+
if not is_tensor(x) and standardize_dtype(dtype) == "bfloat16":
|
| 74 |
+
# Can't create bfloat16 arrays on the fly (e.g. from a h5 Dataset).
|
| 75 |
+
# Instead we convert "as is" (to stored dtype) and cast.
|
| 76 |
+
return jnp.asarray(x).astype(dtype)
|
| 77 |
+
return jnp.asarray(x, dtype=dtype)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def convert_to_numpy(x):
|
| 81 |
+
if isinstance(x, jax_sparse.JAXSparse):
|
| 82 |
+
x = x.todense()
|
| 83 |
+
if is_tensor(x) and x.dtype == "bfloat16":
|
| 84 |
+
return np.array(x, dtype=ml_dtypes.bfloat16)
|
| 85 |
+
return np.array(x)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def is_tensor(x):
|
| 89 |
+
if isinstance(x, (jnp.ndarray, jax_sparse.JAXSparse)):
|
| 90 |
+
return True
|
| 91 |
+
return False
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def shape(x):
|
| 95 |
+
# This will work as long as we disallow
|
| 96 |
+
# dynamic shapes in JAX.
|
| 97 |
+
return x.shape
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def cast(x, dtype):
|
| 101 |
+
return convert_to_tensor(x, dtype=dtype)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# Shape / dtype / sparseness inference util
|
| 105 |
+
def compute_output_spec(fn, *args, **kwargs):
|
| 106 |
+
with StatelessScope(), SymbolicScope():
|
| 107 |
+
built_in_types = (type(None), int, float, str, bool, complex, bytes)
|
| 108 |
+
|
| 109 |
+
# First, separate symbolic args from other args
|
| 110 |
+
static_args_idx = []
|
| 111 |
+
static_args = []
|
| 112 |
+
maybe_symbolic_args = []
|
| 113 |
+
static_kwargs = {}
|
| 114 |
+
maybe_symbolic_kwargs = {}
|
| 115 |
+
for idx, arg in enumerate(args):
|
| 116 |
+
if isinstance(arg, built_in_types):
|
| 117 |
+
static_args_idx.append(idx)
|
| 118 |
+
static_args.append(arg)
|
| 119 |
+
else:
|
| 120 |
+
maybe_symbolic_args.append(arg)
|
| 121 |
+
maybe_symbolic_args = tuple(maybe_symbolic_args)
|
| 122 |
+
for k, v in kwargs.items():
|
| 123 |
+
if isinstance(v, built_in_types):
|
| 124 |
+
static_kwargs[k] = v
|
| 125 |
+
else:
|
| 126 |
+
maybe_symbolic_kwargs[k] = v
|
| 127 |
+
|
| 128 |
+
# Second, find out if there are dynamic shapes
|
| 129 |
+
has_none = False
|
| 130 |
+
for x in tree.flatten((maybe_symbolic_args, maybe_symbolic_kwargs)):
|
| 131 |
+
if isinstance(x, KerasTensor) and any(d is None for d in x.shape):
|
| 132 |
+
has_none = True
|
| 133 |
+
|
| 134 |
+
def convert_keras_tensor_to_jax(x, fill_value=None):
|
| 135 |
+
if isinstance(x, KerasTensor):
|
| 136 |
+
shape = list(x.shape)
|
| 137 |
+
if fill_value:
|
| 138 |
+
for i, e in enumerate(shape):
|
| 139 |
+
if e is None:
|
| 140 |
+
shape[i] = fill_value
|
| 141 |
+
jax_tensor = jax.ShapeDtypeStruct(shape, dtype=x.dtype)
|
| 142 |
+
return jax_tensor
|
| 143 |
+
if isinstance(x, dict):
|
| 144 |
+
return {
|
| 145 |
+
k: convert_keras_tensor_to_jax(v, fill_value=fill_value)
|
| 146 |
+
for k, v in x.items()
|
| 147 |
+
}
|
| 148 |
+
if isinstance(x, list):
|
| 149 |
+
return [
|
| 150 |
+
convert_keras_tensor_to_jax(xi, fill_value=fill_value)
|
| 151 |
+
for xi in x
|
| 152 |
+
]
|
| 153 |
+
return x
|
| 154 |
+
|
| 155 |
+
def wrapped_fn(*args, **kwargs):
|
| 156 |
+
# Turn inputs that are sparse to BCOO tensors
|
| 157 |
+
def to_bcoo_if_sparse(x, maybe_symbolic_x):
|
| 158 |
+
if (
|
| 159 |
+
isinstance(maybe_symbolic_x, KerasTensor)
|
| 160 |
+
and maybe_symbolic_x.sparse
|
| 161 |
+
):
|
| 162 |
+
return jax_sparse.BCOO.fromdense(x, nse=1)
|
| 163 |
+
return x
|
| 164 |
+
|
| 165 |
+
args, kwargs = tree.map_structure(
|
| 166 |
+
to_bcoo_if_sparse,
|
| 167 |
+
(args, kwargs),
|
| 168 |
+
(maybe_symbolic_args, maybe_symbolic_kwargs),
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
rec_args = []
|
| 172 |
+
idx_static = 0
|
| 173 |
+
idx_sym = 0
|
| 174 |
+
i = 0
|
| 175 |
+
while idx_static < len(static_args) or idx_sym < len(args):
|
| 176 |
+
if i in static_args_idx:
|
| 177 |
+
rec_args.append(static_args[idx_static])
|
| 178 |
+
idx_static += 1
|
| 179 |
+
else:
|
| 180 |
+
rec_args.append(args[idx_sym])
|
| 181 |
+
idx_sym += 1
|
| 182 |
+
|
| 183 |
+
i += 1
|
| 184 |
+
with StatelessScope():
|
| 185 |
+
return fn(*rec_args, **kwargs, **static_kwargs)
|
| 186 |
+
|
| 187 |
+
if has_none:
|
| 188 |
+
ms_args_1, ms_kwargs_1 = tree.map_structure(
|
| 189 |
+
lambda x: convert_keras_tensor_to_jax(x, fill_value=83),
|
| 190 |
+
(maybe_symbolic_args, maybe_symbolic_kwargs),
|
| 191 |
+
)
|
| 192 |
+
_, jax_out_1 = jax.make_jaxpr(wrapped_fn, return_shape=True)(
|
| 193 |
+
*ms_args_1, **ms_kwargs_1
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
ms_args_2, ms_kwargs_2 = tree.map_structure(
|
| 197 |
+
lambda x: convert_keras_tensor_to_jax(x, fill_value=89),
|
| 198 |
+
(maybe_symbolic_args, maybe_symbolic_kwargs),
|
| 199 |
+
)
|
| 200 |
+
_, jax_out_2 = jax.make_jaxpr(wrapped_fn, return_shape=True)(
|
| 201 |
+
*ms_args_2, **ms_kwargs_2
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
def merge_shapes(shape1, shape2):
|
| 205 |
+
return tuple(
|
| 206 |
+
[d1 if d1 == d2 else None for d1, d2 in zip(shape1, shape2)]
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
def convert_jax_specs_to_keras_tensor(x1, x2):
|
| 210 |
+
if isinstance(x1, jax.ShapeDtypeStruct):
|
| 211 |
+
if not isinstance(x2, jax.ShapeDtypeStruct):
|
| 212 |
+
raise ValueError("Indeterministic output ordering.")
|
| 213 |
+
return KerasTensor(
|
| 214 |
+
merge_shapes(x1.shape, x2.shape), dtype=x1.dtype
|
| 215 |
+
)
|
| 216 |
+
elif isinstance(x1, jax_sparse.BCOO):
|
| 217 |
+
if not isinstance(x2, jax_sparse.BCOO):
|
| 218 |
+
raise ValueError("Indeterministic output ordering.")
|
| 219 |
+
return KerasTensor(
|
| 220 |
+
merge_shapes(x1.shape, x2.shape),
|
| 221 |
+
dtype=x1.dtype,
|
| 222 |
+
sparse=True,
|
| 223 |
+
)
|
| 224 |
+
else:
|
| 225 |
+
return x1
|
| 226 |
+
|
| 227 |
+
return tree.map_structure(
|
| 228 |
+
convert_jax_specs_to_keras_tensor, jax_out_1, jax_out_2
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
maybe_symbolic_args, maybe_symbolic_kwargs = tree.map_structure(
|
| 232 |
+
convert_keras_tensor_to_jax,
|
| 233 |
+
(maybe_symbolic_args, maybe_symbolic_kwargs),
|
| 234 |
+
)
|
| 235 |
+
_, jax_out = jax.make_jaxpr(wrapped_fn, return_shape=True)(
|
| 236 |
+
*maybe_symbolic_args, **maybe_symbolic_kwargs
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
def convert_jax_spec_to_keras_tensor(x):
|
| 240 |
+
if isinstance(x, jax.ShapeDtypeStruct):
|
| 241 |
+
return KerasTensor(x.shape, x.dtype)
|
| 242 |
+
elif isinstance(x, jax_sparse.BCOO):
|
| 243 |
+
return KerasTensor(x.shape, x.dtype, sparse=True)
|
| 244 |
+
return x
|
| 245 |
+
|
| 246 |
+
return tree.map_structure(convert_jax_spec_to_keras_tensor, jax_out)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def cond(pred, true_fn, false_fn):
|
| 250 |
+
return jax.lax.cond(pred, true_fun=true_fn, false_fun=false_fn)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def vectorized_map(function, elements):
|
| 254 |
+
return jax.vmap(function)(elements)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def map(f, xs):
|
| 258 |
+
return jax.lax.map(f, xs)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def scan(f, init, xs=None, length=None, reverse=False, unroll=1):
|
| 262 |
+
if not isinstance(unroll, bool):
|
| 263 |
+
if not isinstance(unroll, int) or unroll < 1:
|
| 264 |
+
raise ValueError(
|
| 265 |
+
"`unroll` must be an positive integer or boolean. "
|
| 266 |
+
f"Received: unroll={unroll}"
|
| 267 |
+
)
|
| 268 |
+
return jax.lax.scan(
|
| 269 |
+
f, init=init, xs=xs, length=length, reverse=reverse, unroll=unroll
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def associative_scan(f, elems, reverse=False, axis=0):
|
| 274 |
+
return jax.lax.associative_scan(f, elems, reverse, axis)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def scatter(indices, values, shape):
|
| 278 |
+
zeros = jnp.zeros(shape, values.dtype)
|
| 279 |
+
key = tuple(jnp.moveaxis(indices, -1, 0))
|
| 280 |
+
return zeros.at[key].add(values)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def scatter_update(inputs, indices, updates):
|
| 284 |
+
inputs = convert_to_tensor(inputs)
|
| 285 |
+
indices = jnp.array(indices)
|
| 286 |
+
indices = jnp.transpose(indices)
|
| 287 |
+
inputs = inputs.at[tuple(indices)].set(updates)
|
| 288 |
+
return inputs
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def slice(inputs, start_indices, shape):
|
| 292 |
+
return jax.lax.dynamic_slice(inputs, start_indices, shape)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def slice_update(inputs, start_indices, updates):
|
| 296 |
+
return jax.lax.dynamic_update_slice(inputs, updates, start_indices)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def switch(index, branches, *operands):
|
| 300 |
+
return jax.lax.switch(index, branches, *operands)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def while_loop(
|
| 304 |
+
cond,
|
| 305 |
+
body,
|
| 306 |
+
loop_vars,
|
| 307 |
+
maximum_iterations=None,
|
| 308 |
+
):
|
| 309 |
+
is_tuple = isinstance(loop_vars, (tuple, list))
|
| 310 |
+
loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,)
|
| 311 |
+
if maximum_iterations is not None:
|
| 312 |
+
current_iter = 0
|
| 313 |
+
loop_vars = loop_vars + (current_iter,)
|
| 314 |
+
|
| 315 |
+
# Unpack list/tuple args. The last argument is `current_iter`.
|
| 316 |
+
def _cond(args):
|
| 317 |
+
return cond(*args[:-1]) & (args[-1] < maximum_iterations)
|
| 318 |
+
|
| 319 |
+
def _body(args):
|
| 320 |
+
outputs = body(*args[:-1])
|
| 321 |
+
outputs = tuple(outputs) if is_tuple else (outputs,)
|
| 322 |
+
return outputs + (args[-1] + 1,)
|
| 323 |
+
|
| 324 |
+
else:
|
| 325 |
+
|
| 326 |
+
def _cond(args):
|
| 327 |
+
return cond(*args)
|
| 328 |
+
|
| 329 |
+
def _body(args):
|
| 330 |
+
outputs = body(*args)
|
| 331 |
+
return tuple(outputs) if is_tuple else (outputs,)
|
| 332 |
+
|
| 333 |
+
outputs = jax.lax.while_loop(_cond, _body, loop_vars)
|
| 334 |
+
if maximum_iterations is not None:
|
| 335 |
+
outputs = outputs[:-1]
|
| 336 |
+
return outputs if is_tuple else outputs[0]
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def fori_loop(lower, upper, body_fun, init_val):
|
| 340 |
+
return jax.lax.fori_loop(lower, upper, body_fun, init_val)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def stop_gradient(variable):
|
| 344 |
+
if isinstance(variable, Variable):
|
| 345 |
+
variable = variable.value
|
| 346 |
+
return jax.lax.stop_gradient(variable)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def unstack(x, num=None, axis=0):
|
| 350 |
+
return [
|
| 351 |
+
jax.lax.index_in_dim(x, i, axis, keepdims=False)
|
| 352 |
+
for i in range(x.shape[axis])
|
| 353 |
+
]
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def random_seed_dtype():
|
| 357 |
+
# jax random seed uses uint32.
|
| 358 |
+
return "uint32"
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def custom_gradient(fun):
|
| 362 |
+
return jax.custom_gradient(fun=fun)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
class name_scope(base_name_scope):
|
| 366 |
+
def __init__(self, name, **kwargs):
|
| 367 |
+
super().__init__(name, **kwargs)
|
| 368 |
+
self._jax_name_scope = jax.named_scope(name)
|
| 369 |
+
|
| 370 |
+
def __enter__(self):
|
| 371 |
+
name_scope_stack = global_state.get_global_attribute(
|
| 372 |
+
"name_scope_stack", default=[], set_to_default=True
|
| 373 |
+
)
|
| 374 |
+
if self.deduplicate and name_scope_stack:
|
| 375 |
+
parent_caller = name_scope_stack[-1].caller
|
| 376 |
+
parent_name = name_scope_stack[-1].name
|
| 377 |
+
if (
|
| 378 |
+
self.caller is not None
|
| 379 |
+
and self.caller is parent_caller
|
| 380 |
+
and self.name == parent_name
|
| 381 |
+
):
|
| 382 |
+
return self
|
| 383 |
+
name_scope_stack.append(self)
|
| 384 |
+
self._pop_on_exit = True
|
| 385 |
+
self._jax_name_scope.__enter__()
|
| 386 |
+
return self
|
| 387 |
+
|
| 388 |
+
def __exit__(self, *args, **kwargs):
|
| 389 |
+
super().__exit__(*args, **kwargs)
|
| 390 |
+
if self._pop_on_exit:
|
| 391 |
+
self._jax_name_scope.__exit__(*args, **kwargs)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def device_scope(device_name):
|
| 395 |
+
if isinstance(device_name, str):
|
| 396 |
+
# We support string value like "cpu:0", "gpu:1", etc.
|
| 397 |
+
device_name = device_name.lower()
|
| 398 |
+
jax_device = distribution_lib._to_jax_device(device_name)
|
| 399 |
+
elif not isinstance(device_name, jax.Device):
|
| 400 |
+
raise ValueError(
|
| 401 |
+
"Invalid value for argument `device_name`. "
|
| 402 |
+
"Expected a string like 'gpu:0' or a `jax.Device` instance. "
|
| 403 |
+
f"Received: device_name='{device_name}'"
|
| 404 |
+
)
|
| 405 |
+
else:
|
| 406 |
+
jax_device = device_name
|
| 407 |
+
return jax.default_device(jax_device)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/distribution_lib.py
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utilities for distribution strategy with JAX backend."""
|
| 2 |
+
|
| 3 |
+
import jax
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from keras.src.utils import jax_utils
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def list_devices(device_type=None):
|
| 10 |
+
"""Return all the available devices based on the device type.
|
| 11 |
+
|
| 12 |
+
Note that this should return the global devices in a distributed setting.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
device_type: string of `"cpu"`, `"gpu"` or `"tpu"`. Defaults to `"gpu"`
|
| 16 |
+
or `"tpu"` if available when device_type is not provided. Otherwise
|
| 17 |
+
will return the `"cpu"` devices.
|
| 18 |
+
|
| 19 |
+
Return:
|
| 20 |
+
List of devices that are available for distribute computation.
|
| 21 |
+
"""
|
| 22 |
+
device_type = device_type.lower() if device_type else None
|
| 23 |
+
jax_devices = jax.devices(backend=device_type)
|
| 24 |
+
return [f"{device.platform}:{device.id}" for device in jax_devices]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def distribute_variable(value, layout):
|
| 28 |
+
"""Create a distributed variable for JAX.
|
| 29 |
+
|
| 30 |
+
Since JAX doesn't have a variable class, this will just return a `jax.Array`
|
| 31 |
+
with the corresponding layout/sharding specified.
|
| 32 |
+
|
| 33 |
+
Note that this function should be used in eager context, not in jitted
|
| 34 |
+
function.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
value: the initial value of the variable.
|
| 38 |
+
layout: `TensorLayout` for the created variable, or a
|
| 39 |
+
`jax.sharding.Sharding` instance.
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
jax.Array which is the distributed variable.
|
| 43 |
+
"""
|
| 44 |
+
if not isinstance(layout, jax.sharding.Sharding):
|
| 45 |
+
layout = _to_jax_layout(layout)
|
| 46 |
+
if isinstance(
|
| 47 |
+
value, (jax.Array, jax.numpy.ndarray)
|
| 48 |
+
) and value.sharding.is_equivalent_to(layout, ndim=len(value.shape)):
|
| 49 |
+
# Skip the relayout if the value is already having the proper sharding
|
| 50 |
+
return value
|
| 51 |
+
|
| 52 |
+
if layout.is_fully_addressable:
|
| 53 |
+
return jax.device_put(value, layout)
|
| 54 |
+
else:
|
| 55 |
+
# Need to only distribute the value to local addressable devices, and
|
| 56 |
+
# repack them back into global format.
|
| 57 |
+
mapping = layout.addressable_devices_indices_map(value.shape)
|
| 58 |
+
local_values = jax.device_put(
|
| 59 |
+
[value[i] for i in mapping.values()], list(mapping.keys())
|
| 60 |
+
)
|
| 61 |
+
global_value = jax.make_array_from_single_device_arrays(
|
| 62 |
+
value.shape, layout, local_values
|
| 63 |
+
)
|
| 64 |
+
return global_value
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def distribute_tensor(tensor, layout):
|
| 68 |
+
"""Distribute the tensor based on the layout.
|
| 69 |
+
|
| 70 |
+
Note that this function can be used both in eager context, or within a
|
| 71 |
+
jitted function.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
tensor: `jax.Array` that need to be distributed.
|
| 75 |
+
layout: `TensorLayout` for the distribution information, or a
|
| 76 |
+
`jax.sharding.Sharding` instance.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
Distributed value.
|
| 80 |
+
"""
|
| 81 |
+
if not isinstance(layout, jax.sharding.Sharding):
|
| 82 |
+
layout = _to_jax_layout(layout)
|
| 83 |
+
# TODO(scottzhu): This might not be a cheap check, we should consider
|
| 84 |
+
# have some proper JAX API for doing this check.
|
| 85 |
+
if jax_utils.is_in_jax_tracing_scope():
|
| 86 |
+
return jax.lax.with_sharding_constraint(tensor, layout)
|
| 87 |
+
|
| 88 |
+
if layout.is_fully_addressable:
|
| 89 |
+
return jax.device_put(tensor, layout)
|
| 90 |
+
else:
|
| 91 |
+
# Need to only distribute the value to local addressable devices, and
|
| 92 |
+
# repack them back into global format.
|
| 93 |
+
mapping = layout.addressable_devices_indices_map(tensor.shape)
|
| 94 |
+
local_values = jax.device_put(
|
| 95 |
+
[tensor[i] for i in mapping.values()], list(mapping.keys())
|
| 96 |
+
)
|
| 97 |
+
global_value = jax.make_array_from_single_device_arrays(
|
| 98 |
+
tensor.shape, layout, local_values
|
| 99 |
+
)
|
| 100 |
+
return global_value
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def distribute_data_input(per_process_batch, layout, batch_dim_name):
|
| 104 |
+
"""Distribute the input data with the corresponding layout.
|
| 105 |
+
|
| 106 |
+
Note that the inputs here is a local worker batch. Within the local worker,
|
| 107 |
+
the data need to be further partitioned to map to the each of the devices.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
inputs: `jax.Array` that is already sharded to a local process size.
|
| 111 |
+
layout: `TensorLayout` for the distribution information, or a
|
| 112 |
+
`jax.sharding.Sharding` instance.
|
| 113 |
+
|
| 114 |
+
Returns:
|
| 115 |
+
A global batch distributed according to `layout`.
|
| 116 |
+
"""
|
| 117 |
+
if not isinstance(layout, jax.sharding.Sharding):
|
| 118 |
+
layout = _to_jax_layout(layout)
|
| 119 |
+
|
| 120 |
+
num_model_replicas_total = layout.mesh.shape[batch_dim_name]
|
| 121 |
+
|
| 122 |
+
mesh_model_dim_size = 1
|
| 123 |
+
for name, dim_size in layout.mesh.shape.items():
|
| 124 |
+
if not name == batch_dim_name:
|
| 125 |
+
mesh_model_dim_size *= dim_size
|
| 126 |
+
|
| 127 |
+
num_model_replicas_per_process = num_model_replicas_total / num_processes()
|
| 128 |
+
per_process_batch_size = per_process_batch.shape[0]
|
| 129 |
+
|
| 130 |
+
if num_model_replicas_per_process >= 1:
|
| 131 |
+
# If there is more than one model replica per process, we need to
|
| 132 |
+
# further shard the data to each of the model replicas.
|
| 133 |
+
if num_model_replicas_total % num_processes() != 0:
|
| 134 |
+
raise ValueError(
|
| 135 |
+
"If there is more than one replica per process, the batch "
|
| 136 |
+
"dimension of the mesh should be divisible "
|
| 137 |
+
"by the number of processes. Here, "
|
| 138 |
+
f"batch dimension = {num_model_replicas_total}, while "
|
| 139 |
+
f"number of processes = {num_processes()}"
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
per_replica_batch_size = int(
|
| 143 |
+
per_process_batch_size // num_model_replicas_per_process
|
| 144 |
+
)
|
| 145 |
+
if per_process_batch_size % per_replica_batch_size != 0:
|
| 146 |
+
raise ValueError(
|
| 147 |
+
"`per_process_batch_size` should be divisible by `"
|
| 148 |
+
"per_replica_batch_size`. "
|
| 149 |
+
f"per_process_batch_size={per_process_batch_size} and "
|
| 150 |
+
f"per_replica_batch_size = {per_replica_batch_size}"
|
| 151 |
+
)
|
| 152 |
+
per_replica_batches = np.split(
|
| 153 |
+
per_process_batch, num_model_replicas_per_process
|
| 154 |
+
)
|
| 155 |
+
# Replicate data along the model_dim.
|
| 156 |
+
per_device_batches = [
|
| 157 |
+
per_replica_batch
|
| 158 |
+
for per_replica_batch in per_replica_batches
|
| 159 |
+
for _ in range(mesh_model_dim_size)
|
| 160 |
+
]
|
| 161 |
+
batches_on_devices = [
|
| 162 |
+
jax.device_put(batch, device)
|
| 163 |
+
for batch, device in zip(
|
| 164 |
+
per_device_batches, layout.addressable_devices
|
| 165 |
+
)
|
| 166 |
+
]
|
| 167 |
+
else:
|
| 168 |
+
# If there are less than one model replicas per process, we need to
|
| 169 |
+
# replicate the data to each of the model replicas. No further data
|
| 170 |
+
# sharding is needed.
|
| 171 |
+
per_replica_batch_size = per_process_batch_size
|
| 172 |
+
batches_on_devices = [
|
| 173 |
+
jax.device_put(per_process_batch, device)
|
| 174 |
+
for device in layout.addressable_devices
|
| 175 |
+
]
|
| 176 |
+
|
| 177 |
+
global_batch_size = per_replica_batch_size * num_model_replicas_total
|
| 178 |
+
global_batch_shape = (global_batch_size,) + per_process_batch.shape[1:]
|
| 179 |
+
global_batch_array = jax.make_array_from_single_device_arrays(
|
| 180 |
+
shape=global_batch_shape,
|
| 181 |
+
sharding=layout,
|
| 182 |
+
arrays=batches_on_devices,
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
return global_batch_array
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def initialize(job_addresses, num_processes, process_id):
|
| 189 |
+
if job_addresses and "," in job_addresses:
|
| 190 |
+
# When user provide all the job addresses, we will split and get the
|
| 191 |
+
# first one, which is the coordinator.
|
| 192 |
+
job_addresses = job_addresses.split(",")
|
| 193 |
+
# Do a sanity check to make sure the number of addresses also match
|
| 194 |
+
# the num_processes.
|
| 195 |
+
if num_processes is not None and num_processes != len(job_addresses):
|
| 196 |
+
raise ValueError(
|
| 197 |
+
f"The provided job_addresses {job_addresses} has "
|
| 198 |
+
f"{len(job_addresses)} jobs, but num_processes is "
|
| 199 |
+
f"{num_processes}"
|
| 200 |
+
)
|
| 201 |
+
coordinator_address = job_addresses[0]
|
| 202 |
+
else:
|
| 203 |
+
coordinator_address = job_addresses
|
| 204 |
+
|
| 205 |
+
jax.distributed.initialize(
|
| 206 |
+
coordinator_address=coordinator_address,
|
| 207 |
+
num_processes=num_processes,
|
| 208 |
+
process_id=process_id,
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def num_processes():
|
| 213 |
+
"""Return the number of processes for the current distribution setting."""
|
| 214 |
+
return jax.process_count()
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def process_id():
|
| 218 |
+
"""Return the current process ID for the distribution setting."""
|
| 219 |
+
return jax.process_index()
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def _to_jax_device(device_name):
|
| 223 |
+
if isinstance(device_name, jax.Device):
|
| 224 |
+
return device_name
|
| 225 |
+
device_type, device_id = device_name.split(":")
|
| 226 |
+
|
| 227 |
+
devices = jax.devices(backend=device_type)
|
| 228 |
+
for device in devices:
|
| 229 |
+
if device.platform == device_type and device.id == int(device_id):
|
| 230 |
+
return device
|
| 231 |
+
raise ValueError(f"Device not found: {device_name}")
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def _to_jax_mesh(device_mesh):
|
| 235 |
+
"""Convert the DeviceMesh to JAX backend specific Mesh.
|
| 236 |
+
|
| 237 |
+
Args:
|
| 238 |
+
device_mesh: DeviceMesh instance to convert.
|
| 239 |
+
|
| 240 |
+
Returns:
|
| 241 |
+
A `jax.sharding.Mesh` instance.
|
| 242 |
+
"""
|
| 243 |
+
shape = device_mesh.devices.shape
|
| 244 |
+
devices = [_to_jax_device(d) for d in device_mesh.devices.flatten()]
|
| 245 |
+
devices = np.array(devices).reshape(shape)
|
| 246 |
+
return jax.sharding.Mesh(devices, device_mesh.axis_names)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def _to_jax_layout(tensor_layout):
|
| 250 |
+
"""Convert the TensorLayout to JAX backend specific Sharding.
|
| 251 |
+
|
| 252 |
+
Args:
|
| 253 |
+
tensor_layout: TensorLayout instance to convert.
|
| 254 |
+
|
| 255 |
+
Returns:
|
| 256 |
+
A `jax.sharding.NamedSharding` instance.
|
| 257 |
+
"""
|
| 258 |
+
if tensor_layout.device_mesh is None:
|
| 259 |
+
raise ValueError(
|
| 260 |
+
"Cannot create sharding when device mesh is not set "
|
| 261 |
+
"for TensorLayout."
|
| 262 |
+
)
|
| 263 |
+
partition_spec = jax.sharding.PartitionSpec(*tensor_layout.axes)
|
| 264 |
+
jax_mesh = _to_jax_mesh(tensor_layout.device_mesh)
|
| 265 |
+
return jax.sharding.NamedSharding(jax_mesh, partition_spec)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/linalg.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import jax
|
| 2 |
+
import jax.numpy as jnp
|
| 3 |
+
import jax.scipy as jsp
|
| 4 |
+
|
| 5 |
+
from keras.src.backend import config
|
| 6 |
+
from keras.src.backend import standardize_dtype
|
| 7 |
+
from keras.src.backend.common import dtypes
|
| 8 |
+
from keras.src.backend.jax.core import cast
|
| 9 |
+
from keras.src.backend.jax.core import convert_to_tensor
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def cholesky(a):
|
| 13 |
+
out = jnp.linalg.cholesky(a)
|
| 14 |
+
try:
|
| 15 |
+
# In eager mode, raise for nan to
|
| 16 |
+
# achieve behavior consistency with numpy
|
| 17 |
+
if jnp.any(jnp.isnan(out)):
|
| 18 |
+
raise ValueError(
|
| 19 |
+
"Cholesky decomposition failed. "
|
| 20 |
+
"The input might not be a valid "
|
| 21 |
+
"positive definite matrix."
|
| 22 |
+
)
|
| 23 |
+
except jax.errors.TracerBoolConversionError:
|
| 24 |
+
# Cannot raise for nan in tracing mode
|
| 25 |
+
pass
|
| 26 |
+
return out
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def det(a):
|
| 30 |
+
return jnp.linalg.det(a)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def eig(x):
|
| 34 |
+
return jnp.linalg.eig(x)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def eigh(x):
|
| 38 |
+
return jnp.linalg.eigh(x)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def inv(a):
|
| 42 |
+
return jnp.linalg.inv(a)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def lu_factor(x):
|
| 46 |
+
lu_factor_fn = jsp.linalg.lu_factor
|
| 47 |
+
if x.ndim > 2:
|
| 48 |
+
for i in range(x.ndim - 2):
|
| 49 |
+
lu_factor_fn = jax.vmap(lu_factor_fn)
|
| 50 |
+
|
| 51 |
+
return lu_factor_fn(x)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def norm(x, ord=None, axis=None, keepdims=False):
|
| 55 |
+
x = convert_to_tensor(x)
|
| 56 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 57 |
+
dtype = config.floatx()
|
| 58 |
+
else:
|
| 59 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 60 |
+
x = cast(x, dtype)
|
| 61 |
+
return jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def qr(x, mode="reduced"):
|
| 65 |
+
if mode not in {"reduced", "complete"}:
|
| 66 |
+
raise ValueError(
|
| 67 |
+
"`mode` argument value not supported. "
|
| 68 |
+
"Expected one of {'reduced', 'complete'}. "
|
| 69 |
+
f"Received: mode={mode}"
|
| 70 |
+
)
|
| 71 |
+
return jnp.linalg.qr(x, mode=mode)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def solve(a, b):
|
| 75 |
+
return jnp.linalg.solve(a, b)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def solve_triangular(a, b, lower=False):
|
| 79 |
+
return jsp.linalg.solve_triangular(a, b, lower=lower)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def svd(x, full_matrices=True, compute_uv=True):
|
| 83 |
+
return jnp.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def lstsq(a, b, rcond=None):
|
| 87 |
+
a = convert_to_tensor(a)
|
| 88 |
+
b = convert_to_tensor(b)
|
| 89 |
+
return jnp.linalg.lstsq(a, b, rcond=rcond)[0]
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/math.py
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import jax
|
| 4 |
+
import jax.numpy as jnp
|
| 5 |
+
|
| 6 |
+
from keras.src.backend import config
|
| 7 |
+
from keras.src.backend import standardize_dtype
|
| 8 |
+
from keras.src.backend.common import dtypes
|
| 9 |
+
from keras.src.backend.jax.core import cast
|
| 10 |
+
from keras.src.backend.jax.core import convert_to_tensor
|
| 11 |
+
from keras.src.utils.module_utils import scipy
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
|
| 15 |
+
if num_segments is None:
|
| 16 |
+
raise ValueError(
|
| 17 |
+
"Argument `num_segments` must be set when using the JAX backend. "
|
| 18 |
+
"Received: num_segments=None"
|
| 19 |
+
)
|
| 20 |
+
return jax.ops.segment_sum(
|
| 21 |
+
data, segment_ids, num_segments, indices_are_sorted=sorted
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def segment_max(data, segment_ids, num_segments=None, sorted=False):
|
| 26 |
+
if num_segments is None:
|
| 27 |
+
raise ValueError(
|
| 28 |
+
"Argument `num_segments` must be set when using the JAX backend. "
|
| 29 |
+
"Received: num_segments=None"
|
| 30 |
+
)
|
| 31 |
+
return jax.ops.segment_max(
|
| 32 |
+
data, segment_ids, num_segments, indices_are_sorted=sorted
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def top_k(x, k, sorted=True):
|
| 37 |
+
# Jax does not supported `sorted`, but in the case where `sorted=False`,
|
| 38 |
+
# order is not guaranteed, so OK to return sorted output.
|
| 39 |
+
return jax.lax.top_k(x, k)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def in_top_k(targets, predictions, k):
|
| 43 |
+
preds_at_label = jnp.take_along_axis(
|
| 44 |
+
predictions, jnp.expand_dims(targets, axis=-1), axis=-1
|
| 45 |
+
)
|
| 46 |
+
# `nan` shouldn't be considered as large probability.
|
| 47 |
+
preds_at_label = jnp.where(
|
| 48 |
+
jnp.isnan(preds_at_label), -jnp.inf, preds_at_label
|
| 49 |
+
)
|
| 50 |
+
rank = 1 + jnp.sum(jnp.greater(predictions, preds_at_label), axis=-1)
|
| 51 |
+
return jnp.less_equal(rank, k)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def logsumexp(x, axis=None, keepdims=False):
|
| 55 |
+
return jax.scipy.special.logsumexp(x, axis=axis, keepdims=keepdims)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def qr(x, mode="reduced"):
|
| 59 |
+
if mode not in {"reduced", "complete"}:
|
| 60 |
+
raise ValueError(
|
| 61 |
+
"`mode` argument value not supported. "
|
| 62 |
+
"Expected one of {'reduced', 'complete'}. "
|
| 63 |
+
f"Received: mode={mode}"
|
| 64 |
+
)
|
| 65 |
+
return jnp.linalg.qr(x, mode=mode)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def extract_sequences(x, sequence_length, sequence_stride):
|
| 69 |
+
*batch_shape, signal_length = x.shape
|
| 70 |
+
batch_shape = list(batch_shape)
|
| 71 |
+
x = jnp.reshape(x, (math.prod(batch_shape), signal_length, 1))
|
| 72 |
+
x = jax.lax.conv_general_dilated_patches(
|
| 73 |
+
x,
|
| 74 |
+
(sequence_length,),
|
| 75 |
+
(sequence_stride,),
|
| 76 |
+
"VALID",
|
| 77 |
+
dimension_numbers=("NTC", "OIT", "NTC"),
|
| 78 |
+
)
|
| 79 |
+
return jnp.reshape(x, (*batch_shape, *x.shape[-2:]))
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def _get_complex_tensor_from_tuple(x):
|
| 83 |
+
if not isinstance(x, (tuple, list)) or len(x) != 2:
|
| 84 |
+
raise ValueError(
|
| 85 |
+
"Input `x` should be a tuple of two tensors - real and imaginary."
|
| 86 |
+
f"Received: x={x}"
|
| 87 |
+
)
|
| 88 |
+
# `convert_to_tensor` does not support passing complex tensors. We separate
|
| 89 |
+
# the input out into real and imaginary and convert them separately.
|
| 90 |
+
real, imag = x
|
| 91 |
+
# Check shapes.
|
| 92 |
+
if real.shape != imag.shape:
|
| 93 |
+
raise ValueError(
|
| 94 |
+
"Input `x` should be a tuple of two tensors - real and imaginary."
|
| 95 |
+
"Both the real and imaginary parts should have the same shape. "
|
| 96 |
+
f"Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}"
|
| 97 |
+
)
|
| 98 |
+
# Ensure dtype is float.
|
| 99 |
+
if not jnp.issubdtype(real.dtype, jnp.floating) or not jnp.issubdtype(
|
| 100 |
+
imag.dtype, jnp.floating
|
| 101 |
+
):
|
| 102 |
+
raise ValueError(
|
| 103 |
+
"At least one tensor in input `x` is not of type float."
|
| 104 |
+
f"Received: x={x}."
|
| 105 |
+
)
|
| 106 |
+
complex_input = jax.lax.complex(real, imag)
|
| 107 |
+
return complex_input
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def fft(x):
|
| 111 |
+
complex_input = _get_complex_tensor_from_tuple(x)
|
| 112 |
+
complex_output = jnp.fft.fft(complex_input)
|
| 113 |
+
return jnp.real(complex_output), jnp.imag(complex_output)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def fft2(x):
|
| 117 |
+
complex_input = _get_complex_tensor_from_tuple(x)
|
| 118 |
+
complex_output = jnp.fft.fft2(complex_input)
|
| 119 |
+
return jnp.real(complex_output), jnp.imag(complex_output)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def ifft2(x):
|
| 123 |
+
complex_input = _get_complex_tensor_from_tuple(x)
|
| 124 |
+
complex_output = jnp.fft.ifft2(complex_input)
|
| 125 |
+
return jnp.real(complex_output), jnp.imag(complex_output)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def rfft(x, fft_length=None):
|
| 129 |
+
complex_output = jnp.fft.rfft(x, n=fft_length, axis=-1, norm="backward")
|
| 130 |
+
return jnp.real(complex_output), jnp.imag(complex_output)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def irfft(x, fft_length=None):
|
| 134 |
+
complex_input = _get_complex_tensor_from_tuple(x)
|
| 135 |
+
return jnp.fft.irfft(complex_input, n=fft_length, axis=-1, norm="backward")
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def stft(
|
| 139 |
+
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
|
| 140 |
+
):
|
| 141 |
+
if standardize_dtype(x.dtype) not in {"float32", "float64"}:
|
| 142 |
+
raise TypeError(
|
| 143 |
+
"Invalid input type. Expected `float32` or `float64`. "
|
| 144 |
+
f"Received: input type={x.dtype}"
|
| 145 |
+
)
|
| 146 |
+
if fft_length < sequence_length:
|
| 147 |
+
raise ValueError(
|
| 148 |
+
"`fft_length` must equal or larger than `sequence_length`. "
|
| 149 |
+
f"Received: sequence_length={sequence_length}, "
|
| 150 |
+
f"fft_length={fft_length}"
|
| 151 |
+
)
|
| 152 |
+
if isinstance(window, str):
|
| 153 |
+
if window not in {"hann", "hamming"}:
|
| 154 |
+
raise ValueError(
|
| 155 |
+
"If a string is passed to `window`, it must be one of "
|
| 156 |
+
f'`"hann"`, `"hamming"`. Received: window={window}'
|
| 157 |
+
)
|
| 158 |
+
x = convert_to_tensor(x)
|
| 159 |
+
|
| 160 |
+
if center:
|
| 161 |
+
pad_width = [(0, 0) for _ in range(len(x.shape))]
|
| 162 |
+
pad_width[-1] = (fft_length // 2, fft_length // 2)
|
| 163 |
+
x = jnp.pad(x, pad_width, mode="reflect")
|
| 164 |
+
|
| 165 |
+
l_pad = (fft_length - sequence_length) // 2
|
| 166 |
+
r_pad = fft_length - sequence_length - l_pad
|
| 167 |
+
|
| 168 |
+
if window is not None:
|
| 169 |
+
if isinstance(window, str):
|
| 170 |
+
win = convert_to_tensor(
|
| 171 |
+
scipy.signal.get_window(window, sequence_length), dtype=x.dtype
|
| 172 |
+
)
|
| 173 |
+
else:
|
| 174 |
+
win = convert_to_tensor(window, dtype=x.dtype)
|
| 175 |
+
if len(win.shape) != 1 or win.shape[-1] != sequence_length:
|
| 176 |
+
raise ValueError(
|
| 177 |
+
"The shape of `window` must be equal to [sequence_length]."
|
| 178 |
+
f"Received: window shape={win.shape}"
|
| 179 |
+
)
|
| 180 |
+
win = jnp.pad(win, [[l_pad, r_pad]])
|
| 181 |
+
else:
|
| 182 |
+
win = jnp.ones((sequence_length + l_pad + r_pad), dtype=x.dtype)
|
| 183 |
+
|
| 184 |
+
result = jax.scipy.signal.stft(
|
| 185 |
+
x,
|
| 186 |
+
fs=1.0,
|
| 187 |
+
window=win,
|
| 188 |
+
nperseg=(sequence_length + l_pad + r_pad),
|
| 189 |
+
noverlap=(sequence_length + l_pad + r_pad - sequence_stride),
|
| 190 |
+
nfft=fft_length,
|
| 191 |
+
boundary=None,
|
| 192 |
+
padded=False,
|
| 193 |
+
)[-1]
|
| 194 |
+
# scale and swap to (..., num_sequences, fft_bins)
|
| 195 |
+
scale = jnp.sqrt(1.0 / win.sum() ** 2)
|
| 196 |
+
result = result / scale
|
| 197 |
+
result = jnp.swapaxes(result, -2, -1)
|
| 198 |
+
return jnp.real(result), jnp.imag(result)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def istft(
|
| 202 |
+
x,
|
| 203 |
+
sequence_length,
|
| 204 |
+
sequence_stride,
|
| 205 |
+
fft_length,
|
| 206 |
+
length=None,
|
| 207 |
+
window="hann",
|
| 208 |
+
center=True,
|
| 209 |
+
):
|
| 210 |
+
x = _get_complex_tensor_from_tuple(x)
|
| 211 |
+
dtype = jnp.real(x).dtype
|
| 212 |
+
|
| 213 |
+
if len(x.shape) < 2:
|
| 214 |
+
raise ValueError(
|
| 215 |
+
f"Input `x` must have at least 2 dimensions. "
|
| 216 |
+
f"Received shape: {x.shape}"
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
expected_output_len = fft_length + sequence_stride * (x.shape[-2] - 1)
|
| 220 |
+
l_pad = (fft_length - sequence_length) // 2
|
| 221 |
+
r_pad = fft_length - sequence_length - l_pad
|
| 222 |
+
|
| 223 |
+
if window is not None:
|
| 224 |
+
if isinstance(window, str):
|
| 225 |
+
win = convert_to_tensor(
|
| 226 |
+
scipy.signal.get_window(window, sequence_length), dtype=dtype
|
| 227 |
+
)
|
| 228 |
+
else:
|
| 229 |
+
win = convert_to_tensor(window, dtype=dtype)
|
| 230 |
+
if len(win.shape) != 1 or win.shape[-1] != sequence_length:
|
| 231 |
+
raise ValueError(
|
| 232 |
+
"The shape of `window` must be equal to [sequence_length]."
|
| 233 |
+
f"Received: window shape={win.shape}"
|
| 234 |
+
)
|
| 235 |
+
win = jnp.pad(win, [[l_pad, r_pad]])
|
| 236 |
+
else:
|
| 237 |
+
win = jnp.ones((sequence_length + l_pad + r_pad), dtype=dtype)
|
| 238 |
+
|
| 239 |
+
x = jax.scipy.signal.istft(
|
| 240 |
+
x,
|
| 241 |
+
fs=1.0,
|
| 242 |
+
window=win,
|
| 243 |
+
nperseg=(sequence_length + l_pad + r_pad),
|
| 244 |
+
noverlap=(sequence_length + l_pad + r_pad - sequence_stride),
|
| 245 |
+
nfft=fft_length,
|
| 246 |
+
boundary=False,
|
| 247 |
+
time_axis=-2,
|
| 248 |
+
freq_axis=-1,
|
| 249 |
+
)[-1]
|
| 250 |
+
|
| 251 |
+
# scale
|
| 252 |
+
x = x / win.sum() if window is not None else x / sequence_stride
|
| 253 |
+
|
| 254 |
+
start = 0 if center is False else fft_length // 2
|
| 255 |
+
if length is not None:
|
| 256 |
+
end = start + length
|
| 257 |
+
elif center is True:
|
| 258 |
+
end = -(fft_length // 2)
|
| 259 |
+
else:
|
| 260 |
+
end = expected_output_len
|
| 261 |
+
return x[..., start:end]
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def rsqrt(x):
|
| 265 |
+
return jax.lax.rsqrt(x)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def erf(x):
|
| 269 |
+
return jax.lax.erf(x)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def erfinv(x):
|
| 273 |
+
return jax.lax.erf_inv(x)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def solve(a, b):
|
| 277 |
+
a = convert_to_tensor(a)
|
| 278 |
+
b = convert_to_tensor(b)
|
| 279 |
+
return jnp.linalg.solve(a, b)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def norm(x, ord=None, axis=None, keepdims=False):
|
| 283 |
+
x = convert_to_tensor(x)
|
| 284 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 285 |
+
dtype = config.floatx()
|
| 286 |
+
else:
|
| 287 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 288 |
+
x = cast(x, dtype)
|
| 289 |
+
return jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def logdet(x):
|
| 293 |
+
from keras.src.backend.jax.numpy import slogdet
|
| 294 |
+
|
| 295 |
+
# In JAX (like in NumPy) slogdet is more stable than
|
| 296 |
+
# `np.log(np.linalg.det(x))`. See
|
| 297 |
+
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.slogdet.html
|
| 298 |
+
return slogdet(x)[1]
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/nn.py
ADDED
|
@@ -0,0 +1,1197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import builtins
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import jax
|
| 5 |
+
import jax.experimental.sparse as jax_sparse
|
| 6 |
+
import jax.numpy as jnp
|
| 7 |
+
from jax import lax
|
| 8 |
+
from jax import nn as jnn
|
| 9 |
+
from jax.experimental.pallas.ops.tpu import (
|
| 10 |
+
flash_attention as flash_attention_tpu,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
from keras.src import backend
|
| 14 |
+
from keras.src.backend.common.backend_utils import (
|
| 15 |
+
compute_conv_transpose_padding_args_for_jax,
|
| 16 |
+
)
|
| 17 |
+
from keras.src.backend.jax.core import cast
|
| 18 |
+
from keras.src.backend.jax.core import convert_to_tensor
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def relu(x):
|
| 22 |
+
x = convert_to_tensor(x)
|
| 23 |
+
return jnn.relu(x)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def relu6(x):
|
| 27 |
+
x = convert_to_tensor(x)
|
| 28 |
+
return jnn.relu6(x)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def sigmoid(x):
|
| 32 |
+
x = convert_to_tensor(x)
|
| 33 |
+
return jnn.sigmoid(x)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def tanh(x):
|
| 37 |
+
x = convert_to_tensor(x)
|
| 38 |
+
return jnn.tanh(x)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tanh_shrink(x):
|
| 42 |
+
x = convert_to_tensor(x)
|
| 43 |
+
return x - jnp.tanh(x)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def softplus(x):
|
| 47 |
+
x = convert_to_tensor(x)
|
| 48 |
+
return jnn.softplus(x)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def softsign(x):
|
| 52 |
+
x = convert_to_tensor(x)
|
| 53 |
+
return jnn.soft_sign(x)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def soft_shrink(x, threshold=0.5):
|
| 57 |
+
x = convert_to_tensor(x)
|
| 58 |
+
return jnp.where(
|
| 59 |
+
x > threshold,
|
| 60 |
+
x - threshold,
|
| 61 |
+
jnp.where(x < -threshold, x + threshold, 0.0),
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def sparse_plus(x):
|
| 66 |
+
x = convert_to_tensor(x)
|
| 67 |
+
return jnn.sparse_plus(x)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def silu(x):
|
| 71 |
+
x = convert_to_tensor(x)
|
| 72 |
+
return jnn.silu(x)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def squareplus(x, b=4):
|
| 76 |
+
x = convert_to_tensor(x)
|
| 77 |
+
return jnn.squareplus(x, b=b)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def log_sigmoid(x):
|
| 81 |
+
x = convert_to_tensor(x)
|
| 82 |
+
return jnn.log_sigmoid(x)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def leaky_relu(x, negative_slope=0.2):
|
| 86 |
+
x = convert_to_tensor(x)
|
| 87 |
+
return jnn.leaky_relu(x, negative_slope=negative_slope)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def hard_sigmoid(x):
|
| 91 |
+
x = convert_to_tensor(x)
|
| 92 |
+
return jnn.hard_sigmoid(x)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def hard_silu(x):
|
| 96 |
+
x = convert_to_tensor(x)
|
| 97 |
+
return jnn.hard_silu(x)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def elu(x, alpha=1.0):
|
| 101 |
+
x = convert_to_tensor(x)
|
| 102 |
+
return jnn.elu(x, alpha=alpha)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def selu(x):
|
| 106 |
+
x = convert_to_tensor(x)
|
| 107 |
+
return jnn.selu(x)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def gelu(x, approximate=True):
|
| 111 |
+
x = convert_to_tensor(x)
|
| 112 |
+
return jnn.gelu(x, approximate)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def celu(x, alpha=1.0):
|
| 116 |
+
x = convert_to_tensor(x)
|
| 117 |
+
return jnn.celu(x, alpha=alpha)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def glu(x, axis=-1):
|
| 121 |
+
x = convert_to_tensor(x)
|
| 122 |
+
return jnn.glu(x, axis=axis)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def hard_tanh(x):
|
| 126 |
+
x = convert_to_tensor(x)
|
| 127 |
+
return jnn.hard_tanh(x)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def hard_shrink(x, threshold=0.5):
|
| 131 |
+
x = convert_to_tensor(x)
|
| 132 |
+
return jnp.where(jnp.abs(x) > threshold, x, 0.0)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def threshold(x, threshold, default_value):
|
| 136 |
+
x = convert_to_tensor(x)
|
| 137 |
+
return jnp.where(x > threshold, x, default_value)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def softmax(x, axis=-1):
|
| 141 |
+
x = convert_to_tensor(x)
|
| 142 |
+
return jnn.softmax(x, axis=axis)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def log_softmax(x, axis=-1):
|
| 146 |
+
x = convert_to_tensor(x)
|
| 147 |
+
return jnn.log_softmax(x, axis=axis)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def sparsemax(logits, axis=-1):
|
| 151 |
+
# Sort logits along the specified axis in descending order
|
| 152 |
+
logits = convert_to_tensor(logits)
|
| 153 |
+
logits_sorted = -1.0 * jnp.sort(logits * -1.0, axis=axis)
|
| 154 |
+
logits_cumsum = jnp.cumsum(logits_sorted, axis=axis) # find cumulative sum
|
| 155 |
+
r = jnp.arange(1, logits.shape[axis] + 1) # Determine the sparsity
|
| 156 |
+
r_shape = [1] * logits.ndim
|
| 157 |
+
r_shape[axis] = -1 # Broadcast to match the target axis
|
| 158 |
+
r = r.reshape(r_shape)
|
| 159 |
+
support = logits_sorted - (logits_cumsum - 1) / r > 0
|
| 160 |
+
# Find the threshold
|
| 161 |
+
k = jnp.sum(support, axis=axis, keepdims=True)
|
| 162 |
+
logits_cumsum_safe = jnp.where(support, logits_cumsum, 0.0)
|
| 163 |
+
tau = (jnp.sum(logits_cumsum_safe, axis=axis, keepdims=True) - 1) / k
|
| 164 |
+
output = jnp.maximum(logits - tau, 0.0)
|
| 165 |
+
return output
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def _convert_to_spatial_operand(
|
| 169 |
+
x,
|
| 170 |
+
num_spatial_dims,
|
| 171 |
+
data_format="channels_last",
|
| 172 |
+
include_batch_and_channels=True,
|
| 173 |
+
):
|
| 174 |
+
# Helper function that converts an operand to a spatial operand.
|
| 175 |
+
x = (x,) * num_spatial_dims if isinstance(x, int) else x
|
| 176 |
+
if not include_batch_and_channels:
|
| 177 |
+
return x
|
| 178 |
+
if data_format == "channels_last":
|
| 179 |
+
x = (1,) + x + (1,)
|
| 180 |
+
else:
|
| 181 |
+
x = (1,) + (1,) + x
|
| 182 |
+
return x
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def _pool(
|
| 186 |
+
inputs,
|
| 187 |
+
initial_value,
|
| 188 |
+
reduce_fn,
|
| 189 |
+
pool_size,
|
| 190 |
+
strides=None,
|
| 191 |
+
padding="valid",
|
| 192 |
+
):
|
| 193 |
+
"""Helper function to define pooling functions.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
inputs: input data of shape `N+2`.
|
| 197 |
+
initial_value: the initial value for the reduction.
|
| 198 |
+
reduce_fn: a reduce function of the form `(T, T) -> T`.
|
| 199 |
+
pool_size: a sequence of `N` integers, representing the window size to
|
| 200 |
+
reduce over.
|
| 201 |
+
strides: a sequence of `N` integers, representing the inter-window
|
| 202 |
+
strides (default: `(1, ..., 1)`).
|
| 203 |
+
padding: either the string `same` or `valid`.
|
| 204 |
+
|
| 205 |
+
Returns:
|
| 206 |
+
The output of the reduction for each window slice.
|
| 207 |
+
"""
|
| 208 |
+
if padding not in ("same", "valid"):
|
| 209 |
+
raise ValueError(
|
| 210 |
+
f"Invalid padding '{padding}', must be 'same' or 'valid'."
|
| 211 |
+
)
|
| 212 |
+
padding = padding.upper()
|
| 213 |
+
return lax.reduce_window(
|
| 214 |
+
inputs,
|
| 215 |
+
initial_value,
|
| 216 |
+
reduce_fn,
|
| 217 |
+
pool_size,
|
| 218 |
+
strides,
|
| 219 |
+
padding,
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def max_pool(
|
| 224 |
+
inputs,
|
| 225 |
+
pool_size,
|
| 226 |
+
strides=None,
|
| 227 |
+
padding="valid",
|
| 228 |
+
data_format=None,
|
| 229 |
+
):
|
| 230 |
+
data_format = backend.standardize_data_format(data_format)
|
| 231 |
+
num_spatial_dims = inputs.ndim - 2
|
| 232 |
+
pool_size = _convert_to_spatial_operand(
|
| 233 |
+
pool_size, num_spatial_dims, data_format
|
| 234 |
+
)
|
| 235 |
+
strides = pool_size if strides is None else strides
|
| 236 |
+
strides = _convert_to_spatial_operand(
|
| 237 |
+
strides, num_spatial_dims, data_format
|
| 238 |
+
)
|
| 239 |
+
return _pool(inputs, -jnp.inf, lax.max, pool_size, strides, padding)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def average_pool(
|
| 243 |
+
inputs,
|
| 244 |
+
pool_size,
|
| 245 |
+
strides,
|
| 246 |
+
padding,
|
| 247 |
+
data_format=None,
|
| 248 |
+
):
|
| 249 |
+
data_format = backend.standardize_data_format(data_format)
|
| 250 |
+
num_spatial_dims = inputs.ndim - 2
|
| 251 |
+
pool_size = _convert_to_spatial_operand(
|
| 252 |
+
pool_size, num_spatial_dims, data_format
|
| 253 |
+
)
|
| 254 |
+
strides = pool_size if strides is None else strides
|
| 255 |
+
strides = _convert_to_spatial_operand(
|
| 256 |
+
strides, num_spatial_dims, data_format
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
pooled = _pool(inputs, 0.0, lax.add, pool_size, strides, padding)
|
| 260 |
+
if padding == "valid":
|
| 261 |
+
# Avoid the extra reduce_window.
|
| 262 |
+
return pooled / math.prod(pool_size)
|
| 263 |
+
else:
|
| 264 |
+
# Count the number of valid entries at each input point, then use that
|
| 265 |
+
# for computing average. Assumes that any two arrays of same shape will
|
| 266 |
+
# be padded the same. Avoid broadcasting on axis where pooling is
|
| 267 |
+
# skipped.
|
| 268 |
+
shape = [
|
| 269 |
+
(a if b != 1 else 1) for (a, b) in zip(inputs.shape, pool_size)
|
| 270 |
+
]
|
| 271 |
+
window_counts = _pool(
|
| 272 |
+
jnp.ones(shape, inputs.dtype),
|
| 273 |
+
0.0,
|
| 274 |
+
lax.add,
|
| 275 |
+
pool_size,
|
| 276 |
+
strides,
|
| 277 |
+
padding,
|
| 278 |
+
)
|
| 279 |
+
return pooled / window_counts
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def _convert_to_lax_conv_dimension_numbers(
|
| 283 |
+
num_spatial_dims,
|
| 284 |
+
data_format="channels_last",
|
| 285 |
+
transpose=False,
|
| 286 |
+
):
|
| 287 |
+
"""Create a `lax.ConvDimensionNumbers` for the given inputs."""
|
| 288 |
+
num_dims = num_spatial_dims + 2
|
| 289 |
+
|
| 290 |
+
if data_format == "channels_last":
|
| 291 |
+
spatial_dims = tuple(range(1, num_dims - 1))
|
| 292 |
+
inputs_dn = (0, num_dims - 1) + spatial_dims
|
| 293 |
+
else:
|
| 294 |
+
spatial_dims = tuple(range(2, num_dims))
|
| 295 |
+
inputs_dn = (0, 1) + spatial_dims
|
| 296 |
+
|
| 297 |
+
if transpose:
|
| 298 |
+
kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2))
|
| 299 |
+
else:
|
| 300 |
+
kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2))
|
| 301 |
+
|
| 302 |
+
return lax.ConvDimensionNumbers(
|
| 303 |
+
lhs_spec=inputs_dn, rhs_spec=kernel_dn, out_spec=inputs_dn
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def conv(
|
| 308 |
+
inputs,
|
| 309 |
+
kernel,
|
| 310 |
+
strides=1,
|
| 311 |
+
padding="valid",
|
| 312 |
+
data_format=None,
|
| 313 |
+
dilation_rate=1,
|
| 314 |
+
):
|
| 315 |
+
data_format = backend.standardize_data_format(data_format)
|
| 316 |
+
num_spatial_dims = inputs.ndim - 2
|
| 317 |
+
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
|
| 318 |
+
num_spatial_dims,
|
| 319 |
+
data_format,
|
| 320 |
+
transpose=False,
|
| 321 |
+
)
|
| 322 |
+
strides = _convert_to_spatial_operand(
|
| 323 |
+
strides,
|
| 324 |
+
num_spatial_dims,
|
| 325 |
+
data_format,
|
| 326 |
+
include_batch_and_channels=False,
|
| 327 |
+
)
|
| 328 |
+
dilation_rate = _convert_to_spatial_operand(
|
| 329 |
+
dilation_rate,
|
| 330 |
+
num_spatial_dims,
|
| 331 |
+
data_format,
|
| 332 |
+
include_batch_and_channels=False,
|
| 333 |
+
)
|
| 334 |
+
if data_format == "channels_last":
|
| 335 |
+
channels = inputs.shape[-1]
|
| 336 |
+
else:
|
| 337 |
+
channels = inputs.shape[1]
|
| 338 |
+
kernel_in_channels = kernel.shape[-2]
|
| 339 |
+
if channels % kernel_in_channels > 0:
|
| 340 |
+
raise ValueError(
|
| 341 |
+
"The number of input channels must be evenly divisible by "
|
| 342 |
+
f"kernel's in_channels. Received input channels {channels} and "
|
| 343 |
+
f"kernel in_channels {kernel_in_channels}. "
|
| 344 |
+
)
|
| 345 |
+
feature_group_count = channels // kernel_in_channels
|
| 346 |
+
kernel = convert_to_tensor(kernel)
|
| 347 |
+
inputs = convert_to_tensor(inputs, dtype=kernel.dtype)
|
| 348 |
+
return jax.lax.conv_general_dilated(
|
| 349 |
+
inputs,
|
| 350 |
+
kernel,
|
| 351 |
+
strides,
|
| 352 |
+
padding,
|
| 353 |
+
rhs_dilation=dilation_rate,
|
| 354 |
+
dimension_numbers=dimension_numbers,
|
| 355 |
+
feature_group_count=feature_group_count,
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def depthwise_conv(
|
| 360 |
+
inputs,
|
| 361 |
+
kernel,
|
| 362 |
+
strides=1,
|
| 363 |
+
padding="valid",
|
| 364 |
+
data_format=None,
|
| 365 |
+
dilation_rate=1,
|
| 366 |
+
):
|
| 367 |
+
data_format = backend.standardize_data_format(data_format)
|
| 368 |
+
num_spatial_dims = inputs.ndim - 2
|
| 369 |
+
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
|
| 370 |
+
num_spatial_dims,
|
| 371 |
+
data_format,
|
| 372 |
+
transpose=False,
|
| 373 |
+
)
|
| 374 |
+
strides = _convert_to_spatial_operand(
|
| 375 |
+
strides,
|
| 376 |
+
num_spatial_dims,
|
| 377 |
+
data_format,
|
| 378 |
+
include_batch_and_channels=False,
|
| 379 |
+
)
|
| 380 |
+
dilation_rate = _convert_to_spatial_operand(
|
| 381 |
+
dilation_rate,
|
| 382 |
+
num_spatial_dims,
|
| 383 |
+
data_format,
|
| 384 |
+
include_batch_and_channels=False,
|
| 385 |
+
)
|
| 386 |
+
feature_group_count = (
|
| 387 |
+
inputs.shape[-1] if data_format == "channels_last" else inputs.shape[1]
|
| 388 |
+
)
|
| 389 |
+
kernel = jnp.reshape(
|
| 390 |
+
kernel,
|
| 391 |
+
kernel.shape[:-2] + (1, feature_group_count * kernel.shape[-1]),
|
| 392 |
+
)
|
| 393 |
+
return jax.lax.conv_general_dilated(
|
| 394 |
+
inputs,
|
| 395 |
+
kernel,
|
| 396 |
+
strides,
|
| 397 |
+
padding,
|
| 398 |
+
rhs_dilation=dilation_rate,
|
| 399 |
+
dimension_numbers=dimension_numbers,
|
| 400 |
+
feature_group_count=feature_group_count,
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def separable_conv(
|
| 405 |
+
inputs,
|
| 406 |
+
depthwise_kernel,
|
| 407 |
+
pointwise_kernel,
|
| 408 |
+
strides=1,
|
| 409 |
+
padding="valid",
|
| 410 |
+
data_format=None,
|
| 411 |
+
dilation_rate=1,
|
| 412 |
+
):
|
| 413 |
+
data_format = backend.standardize_data_format(data_format)
|
| 414 |
+
depthwise_conv_output = depthwise_conv(
|
| 415 |
+
inputs,
|
| 416 |
+
depthwise_kernel,
|
| 417 |
+
strides,
|
| 418 |
+
padding,
|
| 419 |
+
data_format,
|
| 420 |
+
dilation_rate,
|
| 421 |
+
)
|
| 422 |
+
return conv(
|
| 423 |
+
depthwise_conv_output,
|
| 424 |
+
pointwise_kernel,
|
| 425 |
+
strides=1,
|
| 426 |
+
padding="valid",
|
| 427 |
+
data_format=data_format,
|
| 428 |
+
dilation_rate=dilation_rate,
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def conv_transpose(
|
| 433 |
+
inputs,
|
| 434 |
+
kernel,
|
| 435 |
+
strides=1,
|
| 436 |
+
padding="valid",
|
| 437 |
+
output_padding=None,
|
| 438 |
+
data_format=None,
|
| 439 |
+
dilation_rate=1,
|
| 440 |
+
):
|
| 441 |
+
data_format = backend.standardize_data_format(data_format)
|
| 442 |
+
num_spatial_dims = inputs.ndim - 2
|
| 443 |
+
padding_values = compute_conv_transpose_padding_args_for_jax(
|
| 444 |
+
input_shape=inputs.shape,
|
| 445 |
+
kernel_shape=kernel.shape,
|
| 446 |
+
strides=strides,
|
| 447 |
+
padding=padding,
|
| 448 |
+
output_padding=output_padding,
|
| 449 |
+
dilation_rate=dilation_rate,
|
| 450 |
+
)
|
| 451 |
+
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
|
| 452 |
+
num_spatial_dims,
|
| 453 |
+
data_format,
|
| 454 |
+
transpose=False,
|
| 455 |
+
)
|
| 456 |
+
strides = _convert_to_spatial_operand(
|
| 457 |
+
strides,
|
| 458 |
+
num_spatial_dims,
|
| 459 |
+
data_format,
|
| 460 |
+
include_batch_and_channels=False,
|
| 461 |
+
)
|
| 462 |
+
dilation_rate = _convert_to_spatial_operand(
|
| 463 |
+
dilation_rate,
|
| 464 |
+
num_spatial_dims,
|
| 465 |
+
data_format,
|
| 466 |
+
include_batch_and_channels=False,
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
return jax.lax.conv_transpose(
|
| 470 |
+
inputs,
|
| 471 |
+
kernel,
|
| 472 |
+
strides,
|
| 473 |
+
padding=padding_values,
|
| 474 |
+
rhs_dilation=dilation_rate,
|
| 475 |
+
dimension_numbers=dimension_numbers,
|
| 476 |
+
transpose_kernel=True,
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
def one_hot(x, num_classes, axis=-1, dtype="float32", sparse=False):
|
| 481 |
+
x = convert_to_tensor(x)
|
| 482 |
+
if sparse:
|
| 483 |
+
if axis < 0:
|
| 484 |
+
axis = axis + len(x.shape) + 1
|
| 485 |
+
if dtype is None:
|
| 486 |
+
dtype = "float32"
|
| 487 |
+
# We deal with negative inputs by having zeros in the output although
|
| 488 |
+
# it's useless. It makes shapes static.
|
| 489 |
+
values = jnp.greater_equal(jnp.ravel(x), 0).astype(dtype)
|
| 490 |
+
values_count = values.shape[0]
|
| 491 |
+
indices = [jnp.arange(dim) for dim in x.shape]
|
| 492 |
+
indices = jnp.meshgrid(*indices, indexing="ij")
|
| 493 |
+
indices.insert(axis, jnp.maximum(x, 0)) # Deal with negative indices
|
| 494 |
+
indices = [a.reshape(values_count, 1).astype("int32") for a in indices]
|
| 495 |
+
indices = jnp.concatenate(indices, axis=1)
|
| 496 |
+
shape = list(x.shape)
|
| 497 |
+
shape.insert(axis, num_classes)
|
| 498 |
+
shape = tuple(shape)
|
| 499 |
+
return jax_sparse.BCOO(
|
| 500 |
+
(values, indices),
|
| 501 |
+
shape=shape,
|
| 502 |
+
indices_sorted=True,
|
| 503 |
+
unique_indices=True,
|
| 504 |
+
)
|
| 505 |
+
return jnn.one_hot(x, num_classes, axis=axis, dtype=dtype)
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
def multi_hot(x, num_classes, axis=-1, dtype="float32", sparse=False):
|
| 509 |
+
x = convert_to_tensor(x)
|
| 510 |
+
reduction_axis = 1 if len(x.shape) > 1 else 0
|
| 511 |
+
if sparse:
|
| 512 |
+
result = one_hot(
|
| 513 |
+
x, num_classes, axis=axis, dtype="int32", sparse=sparse
|
| 514 |
+
)
|
| 515 |
+
# JAX's BCOO does not support max reduction, use sum and compare with 0.
|
| 516 |
+
result = jax_sparse.bcoo_reduce_sum(result, axes=(reduction_axis,))
|
| 517 |
+
result = jax_sparse.bcoo_sum_duplicates(result)
|
| 518 |
+
values = jnp.greater_equal(result.data, 0).astype(dtype)
|
| 519 |
+
return jax_sparse.BCOO(
|
| 520 |
+
(values, result.indices),
|
| 521 |
+
shape=result.shape,
|
| 522 |
+
indices_sorted=True,
|
| 523 |
+
unique_indices=True,
|
| 524 |
+
)
|
| 525 |
+
return jnp.max(
|
| 526 |
+
one_hot(cast(x, "int32"), num_classes, axis=axis, dtype=dtype),
|
| 527 |
+
axis=reduction_axis,
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
|
| 532 |
+
target = jnp.array(target)
|
| 533 |
+
output = jnp.array(output)
|
| 534 |
+
|
| 535 |
+
if target.shape != output.shape:
|
| 536 |
+
raise ValueError(
|
| 537 |
+
"Arguments `target` and `output` must have the same shape. "
|
| 538 |
+
"Received: "
|
| 539 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 540 |
+
)
|
| 541 |
+
if len(target.shape) < 1:
|
| 542 |
+
raise ValueError(
|
| 543 |
+
"Arguments `target` and `output` must be at least rank 1. "
|
| 544 |
+
"Received: "
|
| 545 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
if from_logits:
|
| 549 |
+
log_prob = jax.nn.log_softmax(output, axis=axis)
|
| 550 |
+
else:
|
| 551 |
+
output = output / jnp.sum(output, axis, keepdims=True)
|
| 552 |
+
output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
|
| 553 |
+
log_prob = jnp.log(output)
|
| 554 |
+
return -jnp.sum(target * log_prob, axis=axis)
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
|
| 558 |
+
target = jnp.array(target, dtype="int32")
|
| 559 |
+
output = jnp.array(output)
|
| 560 |
+
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
|
| 561 |
+
target = jnp.squeeze(target, axis=-1)
|
| 562 |
+
|
| 563 |
+
if len(output.shape) < 1:
|
| 564 |
+
raise ValueError(
|
| 565 |
+
"Argument `output` must be at least rank 1. "
|
| 566 |
+
"Received: "
|
| 567 |
+
f"output.shape={output.shape}"
|
| 568 |
+
)
|
| 569 |
+
if target.shape != output.shape[:-1]:
|
| 570 |
+
raise ValueError(
|
| 571 |
+
"Arguments `target` and `output` must have the same shape "
|
| 572 |
+
"up until the last dimension: "
|
| 573 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 574 |
+
)
|
| 575 |
+
if from_logits:
|
| 576 |
+
log_prob = jax.nn.log_softmax(output, axis=axis)
|
| 577 |
+
else:
|
| 578 |
+
output = output / jnp.sum(output, axis, keepdims=True)
|
| 579 |
+
output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
|
| 580 |
+
log_prob = jnp.log(output)
|
| 581 |
+
target = jnn.one_hot(target, output.shape[axis], axis=axis)
|
| 582 |
+
return -jnp.sum(target * log_prob, axis=axis)
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
def binary_crossentropy(target, output, from_logits=False):
|
| 586 |
+
target = jnp.array(target)
|
| 587 |
+
output = jnp.array(output)
|
| 588 |
+
|
| 589 |
+
if target.shape != output.shape:
|
| 590 |
+
raise ValueError(
|
| 591 |
+
"Arguments `target` and `output` must have the same shape. "
|
| 592 |
+
"Received: "
|
| 593 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
if from_logits:
|
| 597 |
+
log_logits = jax.nn.log_sigmoid(output)
|
| 598 |
+
log_neg_logits = jax.nn.log_sigmoid(-output)
|
| 599 |
+
return -1.0 * target * log_logits - (1.0 - target) * log_neg_logits
|
| 600 |
+
|
| 601 |
+
output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
|
| 602 |
+
bce = target * jnp.log(output)
|
| 603 |
+
bce += (1.0 - target) * jnp.log(1.0 - output)
|
| 604 |
+
return -bce
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
def moments(x, axes, keepdims=False, synchronized=False):
|
| 608 |
+
if synchronized:
|
| 609 |
+
raise NotImplementedError(
|
| 610 |
+
"Argument synchronized=True is not supported with JAX."
|
| 611 |
+
)
|
| 612 |
+
# The dynamic range of float16 is too limited for statistics. As a
|
| 613 |
+
# workaround, we simply perform the operations on float32 and convert back
|
| 614 |
+
# to float16
|
| 615 |
+
need_cast = False
|
| 616 |
+
ori_dtype = backend.standardize_dtype(x.dtype)
|
| 617 |
+
if ori_dtype in ("float16", "bfloat16"):
|
| 618 |
+
need_cast = True
|
| 619 |
+
x = cast(x, "float32")
|
| 620 |
+
|
| 621 |
+
mean = jnp.mean(x, axes, keepdims=True)
|
| 622 |
+
variance = jnp.var(x, axis=axes, keepdims=True)
|
| 623 |
+
|
| 624 |
+
if not keepdims:
|
| 625 |
+
mean = jnp.squeeze(mean, axes)
|
| 626 |
+
variance = jnp.squeeze(variance, axes)
|
| 627 |
+
if need_cast:
|
| 628 |
+
# avoid overflow and underflow when casting from float16 to float32
|
| 629 |
+
mean = jnp.clip(
|
| 630 |
+
mean, jnp.finfo(jnp.float16).min, jnp.finfo(jnp.float16).max
|
| 631 |
+
)
|
| 632 |
+
variance = jnp.clip(
|
| 633 |
+
variance, jnp.finfo(jnp.float16).min, jnp.finfo(jnp.float16).max
|
| 634 |
+
)
|
| 635 |
+
mean = cast(mean, ori_dtype)
|
| 636 |
+
variance = cast(variance, ori_dtype)
|
| 637 |
+
return mean, variance
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def batch_normalization(
|
| 641 |
+
x, mean, variance, axis, offset=None, scale=None, epsilon=1e-3
|
| 642 |
+
):
|
| 643 |
+
shape = [1] * len(x.shape)
|
| 644 |
+
shape[axis] = mean.shape[0]
|
| 645 |
+
mean = jnp.reshape(mean, shape)
|
| 646 |
+
variance = jnp.reshape(variance, shape)
|
| 647 |
+
|
| 648 |
+
inv = jax.lax.rsqrt(variance + epsilon)
|
| 649 |
+
if scale is not None:
|
| 650 |
+
scale = jnp.reshape(scale, shape)
|
| 651 |
+
inv = inv * scale
|
| 652 |
+
|
| 653 |
+
res = -mean * inv
|
| 654 |
+
if offset is not None:
|
| 655 |
+
offset = jnp.reshape(offset, shape)
|
| 656 |
+
res = res + offset
|
| 657 |
+
|
| 658 |
+
return jnp.add(x * inv, res)
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
def ctc_loss(target, output, target_length, output_length, mask_index=0):
|
| 662 |
+
# Ref: https://github.com/google-deepmind/optax
|
| 663 |
+
# optax.ctc_loss_with_forward_probs
|
| 664 |
+
target = convert_to_tensor(target, dtype="int32")
|
| 665 |
+
output = convert_to_tensor(output)
|
| 666 |
+
target_length = convert_to_tensor(target_length, "int32")
|
| 667 |
+
output_length = convert_to_tensor(output_length, "int32")
|
| 668 |
+
batch_size, max_input_length, num_classes = output.shape
|
| 669 |
+
batch_size, max_label_length = target.shape
|
| 670 |
+
log_epsilon = -1e5
|
| 671 |
+
|
| 672 |
+
# Ensure that the dtype promotion behavior matches that of `tf.nn.ctc_loss`
|
| 673 |
+
dtype = backend.result_type(output.dtype, "float32")
|
| 674 |
+
output = cast(output, dtype)
|
| 675 |
+
|
| 676 |
+
def _lengths_to_paddings(lengths, max_length):
|
| 677 |
+
indices = jnp.arange(max_length).reshape(
|
| 678 |
+
(1,) * lengths.ndim + (max_length,)
|
| 679 |
+
)
|
| 680 |
+
lengths = jnp.expand_dims(lengths, axis=-1)
|
| 681 |
+
elem_valid = indices < lengths
|
| 682 |
+
return jnp.logical_not(elem_valid)
|
| 683 |
+
|
| 684 |
+
target_paddings = _lengths_to_paddings(target_length, max_label_length)
|
| 685 |
+
output_paddings = _lengths_to_paddings(output_length, max_input_length)
|
| 686 |
+
target_paddings = target_paddings.astype(output.dtype)
|
| 687 |
+
output_paddings = output_paddings.astype(output.dtype)
|
| 688 |
+
|
| 689 |
+
logprobs = jnn.log_softmax(output)
|
| 690 |
+
label_lengths = max_label_length - jnp.sum(target_paddings, axis=1).astype(
|
| 691 |
+
jnp.int32
|
| 692 |
+
)
|
| 693 |
+
|
| 694 |
+
# repeat[b, n] == 1.0 when label[b, n] == label[b, n+1].
|
| 695 |
+
repeat = (target[:, :-1] == target[:, 1:]).astype(jnp.float32)
|
| 696 |
+
repeat = jnp.pad(repeat, ((0, 0), (0, 1)))
|
| 697 |
+
|
| 698 |
+
logprobs_phi = logprobs[:, :, mask_index : mask_index + 1] # [B, T, 1]
|
| 699 |
+
logprobs_phi = jnp.transpose(logprobs_phi, (1, 0, 2)) # [T, B, 1]
|
| 700 |
+
|
| 701 |
+
_one_hot = jax.nn.one_hot(target, num_classes=num_classes) # [B, N, K]
|
| 702 |
+
logprobs_emit = jnp.einsum("btk,bnk->btn", logprobs, _one_hot)
|
| 703 |
+
logprobs_emit = jnp.transpose(logprobs_emit, (1, 0, 2)) # [T, B, N]
|
| 704 |
+
|
| 705 |
+
# [B, N]
|
| 706 |
+
logalpha_phi_init = (
|
| 707 |
+
jnp.ones((batch_size, max_label_length + 1), dtype=output.dtype)
|
| 708 |
+
* log_epsilon
|
| 709 |
+
)
|
| 710 |
+
logalpha_phi_init = logalpha_phi_init.at[:, 0].set(0.0)
|
| 711 |
+
logalpha_emit_init = (
|
| 712 |
+
jnp.ones((batch_size, max_label_length), dtype=output.dtype)
|
| 713 |
+
* log_epsilon
|
| 714 |
+
)
|
| 715 |
+
|
| 716 |
+
def update_phi_score(phi, added_score):
|
| 717 |
+
# Update `phi[:, 1:]`` with adding `added_score` in log space.
|
| 718 |
+
return jnp.concatenate(
|
| 719 |
+
[phi[:, :1], jnp.logaddexp(phi[:, 1:], added_score)], axis=-1
|
| 720 |
+
)
|
| 721 |
+
|
| 722 |
+
def loop_body(prev, x):
|
| 723 |
+
prev_phi, prev_emit = prev
|
| 724 |
+
# emit-to-phi epsilon transition, except if the next label is repetition
|
| 725 |
+
prev_phi_orig = prev_phi
|
| 726 |
+
prev_phi = update_phi_score(prev_phi, prev_emit + log_epsilon * repeat)
|
| 727 |
+
|
| 728 |
+
logprob_emit, logprob_phi, pad = x
|
| 729 |
+
|
| 730 |
+
# phi-to-emit transition
|
| 731 |
+
next_emit = jnp.logaddexp(
|
| 732 |
+
prev_phi[:, :-1] + logprob_emit, prev_emit + logprob_emit
|
| 733 |
+
)
|
| 734 |
+
# self-loop transition
|
| 735 |
+
next_phi = prev_phi + logprob_phi
|
| 736 |
+
# emit-to-phi blank transition only when the next label is repetition
|
| 737 |
+
next_phi = update_phi_score(
|
| 738 |
+
next_phi, prev_emit + logprob_phi + log_epsilon * (1.0 - repeat)
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
pad = pad.reshape((batch_size, 1))
|
| 742 |
+
next_emit = pad * prev_emit + (1.0 - pad) * next_emit
|
| 743 |
+
next_phi = pad * prev_phi_orig + (1.0 - pad) * next_phi
|
| 744 |
+
|
| 745 |
+
return (next_phi, next_emit), (next_phi, next_emit)
|
| 746 |
+
|
| 747 |
+
xs = (logprobs_emit, logprobs_phi, output_paddings.transpose((1, 0)))
|
| 748 |
+
_, (logalpha_phi, logalpha_emit) = jax.lax.scan(
|
| 749 |
+
loop_body, (logalpha_phi_init, logalpha_emit_init), xs
|
| 750 |
+
)
|
| 751 |
+
|
| 752 |
+
# last row needs to be updated with the last epsilon transition
|
| 753 |
+
logalpha_phi_last = update_phi_score(logalpha_phi[-1], logalpha_emit[-1])
|
| 754 |
+
logalpha_phi = logalpha_phi.at[-1].set(logalpha_phi_last)
|
| 755 |
+
|
| 756 |
+
# extract per_seq_loss
|
| 757 |
+
# [B, N+1]
|
| 758 |
+
_one_hot = jax.nn.one_hot(label_lengths, num_classes=max_label_length + 1)
|
| 759 |
+
per_seq_loss = -jnp.einsum("bn,bn->b", logalpha_phi_last, _one_hot)
|
| 760 |
+
return per_seq_loss
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
def _ctc_greedy_decode(
|
| 764 |
+
inputs,
|
| 765 |
+
sequence_lengths,
|
| 766 |
+
merge_repeated=True,
|
| 767 |
+
mask_index=None,
|
| 768 |
+
):
|
| 769 |
+
inputs = convert_to_tensor(inputs)
|
| 770 |
+
sequence_lengths = convert_to_tensor(sequence_lengths, dtype="int32")
|
| 771 |
+
batch_size, max_length, num_classes = inputs.shape
|
| 772 |
+
|
| 773 |
+
if mask_index is None:
|
| 774 |
+
mask_index = num_classes - 1
|
| 775 |
+
|
| 776 |
+
indices = jnp.argmax(inputs, axis=-1)
|
| 777 |
+
scores = jnp.max(inputs, axis=-1)
|
| 778 |
+
|
| 779 |
+
seqlen_mask = jnp.arange(max_length)[None, :]
|
| 780 |
+
seqlen_mask = seqlen_mask >= sequence_lengths[:, None]
|
| 781 |
+
|
| 782 |
+
indices = jnp.where(seqlen_mask, mask_index, indices)
|
| 783 |
+
scores = jnp.where(seqlen_mask, 0.0, scores)
|
| 784 |
+
|
| 785 |
+
if merge_repeated:
|
| 786 |
+
repeat_mask = indices[:, 1:] == indices[:, :-1]
|
| 787 |
+
repeat_mask = jnp.pad(repeat_mask, ((0, 0), (1, 0)))
|
| 788 |
+
indices = jnp.where(repeat_mask, mask_index, indices)
|
| 789 |
+
|
| 790 |
+
# We set to -1 for blank labels
|
| 791 |
+
invalid_mask = indices == mask_index
|
| 792 |
+
indices = jnp.where(invalid_mask, -1, indices)
|
| 793 |
+
|
| 794 |
+
# We rearrange the indices by moving `mask_index` to the end of the array
|
| 795 |
+
order = jnp.expand_dims(jnp.arange(max_length), axis=0) # [1, N]
|
| 796 |
+
order = jnp.tile(order, (batch_size, 1)) # [B, N]
|
| 797 |
+
order = jnp.where(invalid_mask, max_length, order)
|
| 798 |
+
order = jnp.argsort(order, axis=-1)
|
| 799 |
+
indices = jnp.take_along_axis(indices, order, axis=-1)
|
| 800 |
+
|
| 801 |
+
scores = -jnp.sum(scores, axis=1)[:, None]
|
| 802 |
+
indices = jnp.expand_dims(indices, axis=0)
|
| 803 |
+
return indices, scores
|
| 804 |
+
|
| 805 |
+
|
| 806 |
+
def _ctc_beam_search_decode(
|
| 807 |
+
inputs,
|
| 808 |
+
sequence_lengths,
|
| 809 |
+
beam_width=100,
|
| 810 |
+
top_paths=1,
|
| 811 |
+
mask_index=None,
|
| 812 |
+
):
|
| 813 |
+
inputs = convert_to_tensor(inputs)
|
| 814 |
+
sequence_lengths = convert_to_tensor(sequence_lengths)
|
| 815 |
+
|
| 816 |
+
batch_size, max_seq_len, num_classes = inputs.shape
|
| 817 |
+
inputs = jnn.log_softmax(inputs)
|
| 818 |
+
seqlen_mask = jnp.arange(max_seq_len)[None, :] >= sequence_lengths[:, None]
|
| 819 |
+
|
| 820 |
+
if mask_index is None:
|
| 821 |
+
mask_index = num_classes - 1
|
| 822 |
+
|
| 823 |
+
# This is a workaround for the fact that jnp.argsort does not support
|
| 824 |
+
# the order parameter which is used to break ties when scores are equal.
|
| 825 |
+
# For compatibility with the tensorflow implementation, we flip the inputs
|
| 826 |
+
# and the mask_index, and then flip the classes back to the correct indices
|
| 827 |
+
inputs = jnp.flip(inputs, axis=2)
|
| 828 |
+
mask_index = num_classes - mask_index - 1
|
| 829 |
+
|
| 830 |
+
_pad = -1
|
| 831 |
+
|
| 832 |
+
init_paths = jnp.full(
|
| 833 |
+
(batch_size, 2 * beam_width, max_seq_len), _pad, dtype=jnp.int32
|
| 834 |
+
)
|
| 835 |
+
|
| 836 |
+
num_init_paths = builtins.min(num_classes, beam_width)
|
| 837 |
+
max_classes = jnp.argsort(inputs[:, 0], axis=1)[:, -num_init_paths:]
|
| 838 |
+
init_classes = jnp.where(max_classes == mask_index, _pad, max_classes)
|
| 839 |
+
init_paths = init_paths.at[:, :num_init_paths, 0].set(init_classes)
|
| 840 |
+
|
| 841 |
+
init_scores = (
|
| 842 |
+
jnp.full((batch_size, 2 * beam_width), -jnp.inf, dtype=inputs.dtype)
|
| 843 |
+
.at[:, :num_init_paths]
|
| 844 |
+
.set(jnp.take_along_axis(inputs[:, 0], max_classes, axis=1))
|
| 845 |
+
)
|
| 846 |
+
init_masked = init_paths[:, :, 0] == _pad
|
| 847 |
+
|
| 848 |
+
def _extend_paths(paths, scores, masked, x):
|
| 849 |
+
paths = jnp.repeat(paths, num_classes, axis=0)
|
| 850 |
+
scores = jnp.repeat(scores, num_classes)
|
| 851 |
+
masked = jnp.repeat(masked, num_classes)
|
| 852 |
+
|
| 853 |
+
path_tail_index = jnp.argmax(paths == _pad, axis=1)
|
| 854 |
+
paths_arange = jnp.arange(2 * beam_width * num_classes)
|
| 855 |
+
path_tails = paths[paths_arange, path_tail_index - 1]
|
| 856 |
+
path_tails = jnp.where(path_tail_index == 0, _pad, path_tails)
|
| 857 |
+
|
| 858 |
+
classes = jnp.arange(num_classes).at[mask_index].set(_pad)
|
| 859 |
+
classes = jnp.tile(classes, 2 * beam_width)
|
| 860 |
+
|
| 861 |
+
prev_masked = masked
|
| 862 |
+
masked = classes == _pad
|
| 863 |
+
|
| 864 |
+
masked_repeat = ~prev_masked & (path_tails == classes)
|
| 865 |
+
classes = jnp.where(masked_repeat, _pad, classes)
|
| 866 |
+
paths = paths.at[paths_arange, path_tail_index].set(classes)
|
| 867 |
+
|
| 868 |
+
x = jnp.tile(x, 2 * beam_width)
|
| 869 |
+
scores = scores + x
|
| 870 |
+
|
| 871 |
+
return paths, scores, masked
|
| 872 |
+
|
| 873 |
+
def _merge_scores(unique_inverse, scores):
|
| 874 |
+
scores_max = jnp.max(scores)
|
| 875 |
+
scores_exp = jnp.exp(scores - scores_max)
|
| 876 |
+
scores = jnp.zeros_like(scores).at[unique_inverse].add(scores_exp)
|
| 877 |
+
scores = jnp.log(scores) + scores_max
|
| 878 |
+
return scores
|
| 879 |
+
|
| 880 |
+
def _prune_paths(paths, scores, masked):
|
| 881 |
+
paths, unique_inverse = jnp.unique(
|
| 882 |
+
paths,
|
| 883 |
+
return_inverse=True,
|
| 884 |
+
size=2 * num_classes * beam_width,
|
| 885 |
+
axis=0,
|
| 886 |
+
fill_value=_pad,
|
| 887 |
+
)
|
| 888 |
+
if len(unique_inverse.shape) >= 2:
|
| 889 |
+
unique_inverse = jnp.squeeze(unique_inverse, axis=1)
|
| 890 |
+
|
| 891 |
+
emit_scores = jnp.where(masked, -jnp.inf, scores)
|
| 892 |
+
mask_scores = jnp.where(masked, scores, -jnp.inf)
|
| 893 |
+
|
| 894 |
+
emit_scores = _merge_scores(unique_inverse, emit_scores)
|
| 895 |
+
mask_scores = _merge_scores(unique_inverse, mask_scores)
|
| 896 |
+
|
| 897 |
+
total_scores = jnp.logaddexp(emit_scores, mask_scores)
|
| 898 |
+
top_indices = jnp.argsort(total_scores)[-beam_width:]
|
| 899 |
+
|
| 900 |
+
paths = paths[top_indices]
|
| 901 |
+
emit_scores = emit_scores[top_indices]
|
| 902 |
+
mask_scores = mask_scores[top_indices]
|
| 903 |
+
|
| 904 |
+
paths = jnp.tile(paths, (2, 1))
|
| 905 |
+
scores = jnp.concatenate([emit_scores, mask_scores])
|
| 906 |
+
masked = jnp.concatenate(
|
| 907 |
+
[jnp.zeros(beam_width, bool), jnp.ones(beam_width, bool)]
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
return paths, scores, masked
|
| 911 |
+
|
| 912 |
+
def _decode_step(paths, scores, masked, x):
|
| 913 |
+
paths, scores, masked = _extend_paths(paths, scores, masked, x)
|
| 914 |
+
paths, scores, masked = _prune_paths(paths, scores, masked)
|
| 915 |
+
return paths, scores, masked
|
| 916 |
+
|
| 917 |
+
def _step(prev, x):
|
| 918 |
+
paths, scores, masked = prev
|
| 919 |
+
x, seqlen_mask = x
|
| 920 |
+
|
| 921 |
+
paths, scores, masked = lax.cond(
|
| 922 |
+
seqlen_mask,
|
| 923 |
+
lambda paths, scores, masked, x: (paths, scores, masked),
|
| 924 |
+
_decode_step,
|
| 925 |
+
paths,
|
| 926 |
+
scores,
|
| 927 |
+
masked,
|
| 928 |
+
x,
|
| 929 |
+
)
|
| 930 |
+
|
| 931 |
+
return (paths, scores, masked), None
|
| 932 |
+
|
| 933 |
+
def _decode_batch(
|
| 934 |
+
init_paths, init_scores, init_masked, inputs, seqlen_mask
|
| 935 |
+
):
|
| 936 |
+
(paths, scores, masked), _ = lax.scan(
|
| 937 |
+
_step,
|
| 938 |
+
(init_paths, init_scores, init_masked),
|
| 939 |
+
(inputs[1:], seqlen_mask[1:]),
|
| 940 |
+
)
|
| 941 |
+
|
| 942 |
+
paths, unique_inverse = jnp.unique(
|
| 943 |
+
paths,
|
| 944 |
+
return_inverse=True,
|
| 945 |
+
size=2 * num_classes * beam_width,
|
| 946 |
+
axis=0,
|
| 947 |
+
fill_value=_pad,
|
| 948 |
+
)
|
| 949 |
+
if len(unique_inverse.shape) >= 2:
|
| 950 |
+
unique_inverse = jnp.squeeze(unique_inverse, axis=1)
|
| 951 |
+
scores = _merge_scores(unique_inverse, scores)
|
| 952 |
+
|
| 953 |
+
top_indices = jnp.argsort(scores)[-top_paths:][::-1]
|
| 954 |
+
paths = paths[top_indices]
|
| 955 |
+
scores = scores[top_indices]
|
| 956 |
+
|
| 957 |
+
return paths, scores
|
| 958 |
+
|
| 959 |
+
paths, scores = jax.vmap(_decode_batch)(
|
| 960 |
+
init_paths, init_scores, init_masked, inputs, seqlen_mask
|
| 961 |
+
)
|
| 962 |
+
|
| 963 |
+
# convert classes back to the correct indices
|
| 964 |
+
paths = jnp.where(paths == _pad, _pad, num_classes - paths - 1)
|
| 965 |
+
paths = jnp.transpose(paths, [1, 0, 2])
|
| 966 |
+
return paths, scores
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
def ctc_decode(
|
| 970 |
+
inputs,
|
| 971 |
+
sequence_lengths,
|
| 972 |
+
strategy="greedy",
|
| 973 |
+
beam_width=100,
|
| 974 |
+
top_paths=1,
|
| 975 |
+
merge_repeated=True,
|
| 976 |
+
mask_index=0,
|
| 977 |
+
):
|
| 978 |
+
inputs = convert_to_tensor(inputs)
|
| 979 |
+
dtype = backend.result_type(inputs.dtype, "float32")
|
| 980 |
+
inputs = cast(inputs, dtype)
|
| 981 |
+
|
| 982 |
+
if strategy == "greedy":
|
| 983 |
+
return _ctc_greedy_decode(
|
| 984 |
+
inputs,
|
| 985 |
+
sequence_lengths,
|
| 986 |
+
merge_repeated=merge_repeated,
|
| 987 |
+
mask_index=mask_index,
|
| 988 |
+
)
|
| 989 |
+
elif strategy == "beam_search":
|
| 990 |
+
return _ctc_beam_search_decode(
|
| 991 |
+
inputs,
|
| 992 |
+
sequence_lengths,
|
| 993 |
+
beam_width=beam_width,
|
| 994 |
+
top_paths=top_paths,
|
| 995 |
+
mask_index=mask_index,
|
| 996 |
+
)
|
| 997 |
+
else:
|
| 998 |
+
raise ValueError(
|
| 999 |
+
f"Invalid strategy {strategy}. Supported values are "
|
| 1000 |
+
"'greedy' and 'beam_search'."
|
| 1001 |
+
)
|
| 1002 |
+
|
| 1003 |
+
|
| 1004 |
+
def psnr(x1, x2, max_val):
|
| 1005 |
+
if x1.shape != x2.shape:
|
| 1006 |
+
raise ValueError(
|
| 1007 |
+
f"Input shapes {x1.shape} and {x2.shape} must "
|
| 1008 |
+
"match for PSNR calculation. "
|
| 1009 |
+
)
|
| 1010 |
+
|
| 1011 |
+
max_val = convert_to_tensor(max_val, dtype=x2.dtype)
|
| 1012 |
+
mse = jnp.mean(jnp.square(x1 - x2))
|
| 1013 |
+
psnr = 20 * jnp.log10(max_val) - 10 * jnp.log10(mse)
|
| 1014 |
+
return psnr
|
| 1015 |
+
|
| 1016 |
+
|
| 1017 |
+
def _can_use_flash_attention(query, key, value, bias, raise_error=False):
|
| 1018 |
+
"""Verify the availability of flash attention."""
|
| 1019 |
+
try:
|
| 1020 |
+
from jax._src.cudnn.fused_attention_stablehlo import _normalize_layout
|
| 1021 |
+
from jax._src.cudnn.fused_attention_stablehlo import (
|
| 1022 |
+
check_compute_capability,
|
| 1023 |
+
)
|
| 1024 |
+
from jax._src.cudnn.fused_attention_stablehlo import check_cudnn_version
|
| 1025 |
+
from jax._src.cudnn.fused_attention_stablehlo import (
|
| 1026 |
+
check_is_flash_attention,
|
| 1027 |
+
)
|
| 1028 |
+
from jax._src.cudnn.fused_attention_stablehlo import check_layout
|
| 1029 |
+
from jax.nn import dot_product_attention as dot_product_attention
|
| 1030 |
+
except ImportError:
|
| 1031 |
+
if raise_error:
|
| 1032 |
+
raise ImportError(
|
| 1033 |
+
"Flash attention is not supported in your current JAX version. "
|
| 1034 |
+
"Please update it by following the official guide: "
|
| 1035 |
+
"https://jax.readthedocs.io/en/latest/installation.html"
|
| 1036 |
+
)
|
| 1037 |
+
return False
|
| 1038 |
+
|
| 1039 |
+
try:
|
| 1040 |
+
# Check if cuDNN is installed and raise RuntimeError if cuDNN is not
|
| 1041 |
+
# detected
|
| 1042 |
+
cudnn_version = check_cudnn_version()
|
| 1043 |
+
# Only support at least Ampere
|
| 1044 |
+
if not check_compute_capability("8.0"):
|
| 1045 |
+
raise RuntimeError("Require at least Ampere arch to run")
|
| 1046 |
+
# Check inputs layout
|
| 1047 |
+
check_layout(
|
| 1048 |
+
query,
|
| 1049 |
+
key,
|
| 1050 |
+
value,
|
| 1051 |
+
bias,
|
| 1052 |
+
q_seqlen=None,
|
| 1053 |
+
kv_seqlen=None,
|
| 1054 |
+
layout=_normalize_layout("BTNH"),
|
| 1055 |
+
)
|
| 1056 |
+
check_is_flash_attention(
|
| 1057 |
+
query,
|
| 1058 |
+
key,
|
| 1059 |
+
_normalize_layout("BTNH"),
|
| 1060 |
+
cudnn_version,
|
| 1061 |
+
bias is not None,
|
| 1062 |
+
is_training=False,
|
| 1063 |
+
)
|
| 1064 |
+
return True
|
| 1065 |
+
except:
|
| 1066 |
+
if raise_error:
|
| 1067 |
+
raise
|
| 1068 |
+
return False
|
| 1069 |
+
|
| 1070 |
+
|
| 1071 |
+
def _apply_masks(logits, mask, is_causal):
|
| 1072 |
+
if mask is None and not is_causal:
|
| 1073 |
+
return logits
|
| 1074 |
+
|
| 1075 |
+
combined_mask = jnp.ones_like(logits, dtype="bool")
|
| 1076 |
+
if mask is not None:
|
| 1077 |
+
combined_mask = jnp.logical_and(combined_mask, mask)
|
| 1078 |
+
|
| 1079 |
+
if is_causal:
|
| 1080 |
+
T, S = logits.shape[2], logits.shape[3]
|
| 1081 |
+
mask = jnp.tril(jnp.ones((T, S), dtype="bool"))
|
| 1082 |
+
mask = mask[None, None, :, :]
|
| 1083 |
+
combined_mask = jnp.logical_and(combined_mask, mask)
|
| 1084 |
+
|
| 1085 |
+
large_negative_number = jnp.asarray(
|
| 1086 |
+
-0.7 * jnp.finfo(logits.dtype).max, dtype=logits.dtype
|
| 1087 |
+
)
|
| 1088 |
+
padded_logits = jnp.where(combined_mask, logits, large_negative_number)
|
| 1089 |
+
return padded_logits
|
| 1090 |
+
|
| 1091 |
+
|
| 1092 |
+
def _dot_product_attention_core(
|
| 1093 |
+
query, key, value, bias, mask, is_causal, scale
|
| 1094 |
+
):
|
| 1095 |
+
logits_dtype = jnp.promote_types(query.dtype, jnp.float32)
|
| 1096 |
+
logits = jnp.einsum(
|
| 1097 |
+
"BTNH,BSNH->BNTS", query, key, preferred_element_type=logits_dtype
|
| 1098 |
+
)
|
| 1099 |
+
logits *= jnp.array(scale, dtype=logits.dtype)
|
| 1100 |
+
|
| 1101 |
+
if bias is not None:
|
| 1102 |
+
logits = (logits + bias).astype(logits.dtype)
|
| 1103 |
+
|
| 1104 |
+
padded_logits = _apply_masks(logits, mask, is_causal)
|
| 1105 |
+
|
| 1106 |
+
# Softmax and it is always carried out in fp32.
|
| 1107 |
+
padded_logits = padded_logits.astype(jnp.float32)
|
| 1108 |
+
probs = jax.nn.softmax(padded_logits, axis=-1).astype(key.dtype)
|
| 1109 |
+
return jnp.einsum("BNTS,BSNH->BTNH", probs, value)
|
| 1110 |
+
|
| 1111 |
+
|
| 1112 |
+
def dot_product_attention(
|
| 1113 |
+
query,
|
| 1114 |
+
key,
|
| 1115 |
+
value,
|
| 1116 |
+
bias=None,
|
| 1117 |
+
mask=None,
|
| 1118 |
+
scale=None,
|
| 1119 |
+
is_causal=False,
|
| 1120 |
+
flash_attention=None,
|
| 1121 |
+
):
|
| 1122 |
+
query = convert_to_tensor(query)
|
| 1123 |
+
key = convert_to_tensor(key)
|
| 1124 |
+
value = convert_to_tensor(value)
|
| 1125 |
+
if len(query.shape) != 4 or len(key.shape) != 4 or len(value.shape) != 4:
|
| 1126 |
+
raise ValueError(
|
| 1127 |
+
"`dot_product_attention` only supports 4D inputs. "
|
| 1128 |
+
f"Received: query.shape={query.shape}, key.shape={key.shape}, "
|
| 1129 |
+
f"value.shape={value.shape}."
|
| 1130 |
+
)
|
| 1131 |
+
if flash_attention is None:
|
| 1132 |
+
flash_attention = _can_use_flash_attention(query, key, value, bias)
|
| 1133 |
+
elif flash_attention is True:
|
| 1134 |
+
# Use `raise_error=True` to provide more details if the inputs failed to
|
| 1135 |
+
# use flash attention
|
| 1136 |
+
_can_use_flash_attention(query, key, value, bias, raise_error=True)
|
| 1137 |
+
if jax.devices()[0].platform == "tpu" and flash_attention:
|
| 1138 |
+
# Use TPU-optimized flash attention from Pallas
|
| 1139 |
+
return flash_attention_tpu(
|
| 1140 |
+
query,
|
| 1141 |
+
key,
|
| 1142 |
+
value,
|
| 1143 |
+
ab=bias,
|
| 1144 |
+
segment_ids=mask,
|
| 1145 |
+
causal=is_causal,
|
| 1146 |
+
sm_scale=scale,
|
| 1147 |
+
)
|
| 1148 |
+
# `dot_product_attention` is only available in jax>=0.4.31
|
| 1149 |
+
if hasattr(jax.nn, "dot_product_attention"):
|
| 1150 |
+
return jax.nn.dot_product_attention(
|
| 1151 |
+
query,
|
| 1152 |
+
key,
|
| 1153 |
+
value,
|
| 1154 |
+
bias=bias,
|
| 1155 |
+
mask=mask,
|
| 1156 |
+
scale=scale,
|
| 1157 |
+
is_causal=is_causal,
|
| 1158 |
+
implementation="cudnn" if flash_attention else "xla",
|
| 1159 |
+
)
|
| 1160 |
+
|
| 1161 |
+
if flash_attention:
|
| 1162 |
+
raise RuntimeError(
|
| 1163 |
+
"Flash attention is not supported in your current JAX version. "
|
| 1164 |
+
"Please update it by following the official guide: "
|
| 1165 |
+
"https://jax.readthedocs.io/en/latest/installation.html"
|
| 1166 |
+
)
|
| 1167 |
+
# Ref: jax.nn.dot_product_attention
|
| 1168 |
+
# https://github.com/jax-ml/jax/blob/jax-v0.4.33/jax/_src/nn/functions.py#L886
|
| 1169 |
+
# Not support `query_seq_lengths` and `key_value_seq_lengths` args
|
| 1170 |
+
output_shape = query.shape
|
| 1171 |
+
_, _, K, H = key.shape
|
| 1172 |
+
scale = (1.0 / jnp.sqrt(H)) if scale is None else scale
|
| 1173 |
+
|
| 1174 |
+
# _dot_product_attention_xla
|
| 1175 |
+
B, T, N, H = query.shape
|
| 1176 |
+
G = N // K
|
| 1177 |
+
query = jnp.reshape(query, (B, T, K, G, H))
|
| 1178 |
+
|
| 1179 |
+
def _reshape_to_grouped(t):
|
| 1180 |
+
if t is not None:
|
| 1181 |
+
tB, tN, tT, tS = t.shape
|
| 1182 |
+
if tN == 1:
|
| 1183 |
+
t = jnp.broadcast_to(t[:, :, None, :, :], (tB, tN, G, tT, tS))
|
| 1184 |
+
else:
|
| 1185 |
+
assert tN == N
|
| 1186 |
+
t = jnp.reshape(t, (tB, K, G, tT, tS))
|
| 1187 |
+
return t
|
| 1188 |
+
|
| 1189 |
+
bias = _reshape_to_grouped(bias)
|
| 1190 |
+
mask = _reshape_to_grouped(mask)
|
| 1191 |
+
vmapped_fn = jax.vmap(
|
| 1192 |
+
_dot_product_attention_core,
|
| 1193 |
+
in_axes=(3, None, None, 2, 2, None, None),
|
| 1194 |
+
out_axes=3,
|
| 1195 |
+
)
|
| 1196 |
+
encoded = vmapped_fn(query, key, value, bias, mask, is_causal, scale)
|
| 1197 |
+
return jnp.reshape(encoded, output_shape)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/numpy.py
ADDED
|
@@ -0,0 +1,1277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import builtins
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import jax.experimental.sparse as jax_sparse
|
| 5 |
+
import jax.numpy as jnp
|
| 6 |
+
|
| 7 |
+
from keras.src.backend import config
|
| 8 |
+
from keras.src.backend.common import dtypes
|
| 9 |
+
from keras.src.backend.common.backend_utils import canonicalize_axis
|
| 10 |
+
from keras.src.backend.common.backend_utils import to_tuple_or_list
|
| 11 |
+
from keras.src.backend.common.variables import standardize_dtype
|
| 12 |
+
from keras.src.backend.jax import nn
|
| 13 |
+
from keras.src.backend.jax import sparse
|
| 14 |
+
from keras.src.backend.jax.core import cast
|
| 15 |
+
from keras.src.backend.jax.core import convert_to_tensor
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@sparse.elementwise_binary_union(linear=True, use_sparsify=True)
|
| 19 |
+
def add(x1, x2):
|
| 20 |
+
x1 = convert_to_tensor(x1)
|
| 21 |
+
x2 = convert_to_tensor(x2)
|
| 22 |
+
return jnp.add(x1, x2)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def bincount(x, weights=None, minlength=0, sparse=False):
|
| 26 |
+
# Note: bincount is never tracable / jittable because the output shape
|
| 27 |
+
# depends on the values in x.
|
| 28 |
+
if sparse or isinstance(x, jax_sparse.BCOO):
|
| 29 |
+
if isinstance(x, jax_sparse.BCOO):
|
| 30 |
+
if weights is not None:
|
| 31 |
+
if not isinstance(weights, jax_sparse.BCOO):
|
| 32 |
+
raise ValueError("`x` and `weights` must both be BCOOs")
|
| 33 |
+
if x.indices is not weights.indices:
|
| 34 |
+
# This test works in eager mode only
|
| 35 |
+
if not jnp.all(jnp.equal(x.indices, weights.indices)):
|
| 36 |
+
raise ValueError(
|
| 37 |
+
"`x` and `weights` BCOOs must have the same indices"
|
| 38 |
+
)
|
| 39 |
+
weights = weights.data
|
| 40 |
+
x = x.data
|
| 41 |
+
reduction_axis = 1 if len(x.shape) > 1 else 0
|
| 42 |
+
maxlength = jnp.maximum(jnp.max(x) + 1, minlength)
|
| 43 |
+
one_hot_encoding = nn.one_hot(x, maxlength, sparse=True)
|
| 44 |
+
if weights is not None:
|
| 45 |
+
expanded_weights = jnp.expand_dims(weights, reduction_axis + 1)
|
| 46 |
+
one_hot_encoding = one_hot_encoding * expanded_weights
|
| 47 |
+
|
| 48 |
+
outputs = jax_sparse.bcoo_reduce_sum(
|
| 49 |
+
one_hot_encoding,
|
| 50 |
+
axes=(reduction_axis,),
|
| 51 |
+
)
|
| 52 |
+
return outputs
|
| 53 |
+
if len(x.shape) == 2:
|
| 54 |
+
if weights is None:
|
| 55 |
+
|
| 56 |
+
def bincount_fn(arr):
|
| 57 |
+
return jnp.bincount(arr, minlength=minlength)
|
| 58 |
+
|
| 59 |
+
bincounts = list(map(bincount_fn, x))
|
| 60 |
+
else:
|
| 61 |
+
|
| 62 |
+
def bincount_fn(arr_w):
|
| 63 |
+
return jnp.bincount(
|
| 64 |
+
arr_w[0], weights=arr_w[1], minlength=minlength
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
bincounts = list(map(bincount_fn, zip(x, weights)))
|
| 68 |
+
|
| 69 |
+
return jnp.stack(bincounts)
|
| 70 |
+
return jnp.bincount(x, weights=weights, minlength=minlength)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def einsum(subscripts, *operands, **kwargs):
|
| 74 |
+
operands = [convert_to_tensor(x) for x in operands]
|
| 75 |
+
# When all operands are of int8, specifying `preferred_element_type` as
|
| 76 |
+
# int32 to enable hardware-accelerated einsum
|
| 77 |
+
dtypes = list(set(standardize_dtype(x.dtype) for x in operands))
|
| 78 |
+
if len(dtypes) == 1 and dtypes[0] == "int8":
|
| 79 |
+
preferred_element_type = "int32"
|
| 80 |
+
else:
|
| 81 |
+
preferred_element_type = None
|
| 82 |
+
kwargs["preferred_element_type"] = preferred_element_type
|
| 83 |
+
return jnp.einsum(subscripts, *operands, **kwargs)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@sparse.elementwise_binary_union(linear=True, use_sparsify=True)
|
| 87 |
+
def subtract(x1, x2):
|
| 88 |
+
x1 = convert_to_tensor(x1)
|
| 89 |
+
x2 = convert_to_tensor(x2)
|
| 90 |
+
return jnp.subtract(x1, x2)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def matmul(x1, x2):
|
| 94 |
+
x1 = convert_to_tensor(x1)
|
| 95 |
+
x2 = convert_to_tensor(x2)
|
| 96 |
+
# When both x1 and x2 are of int8, specifying `preferred_element_type` as
|
| 97 |
+
# int32 to enable hardware-accelerated matmul
|
| 98 |
+
x1_dtype = standardize_dtype(x1.dtype)
|
| 99 |
+
x2_dtype = standardize_dtype(x2.dtype)
|
| 100 |
+
if x1_dtype == "int8" and x2_dtype == "int8":
|
| 101 |
+
preferred_element_type = "int32"
|
| 102 |
+
else:
|
| 103 |
+
preferred_element_type = None
|
| 104 |
+
if isinstance(x1, jax_sparse.JAXSparse) or isinstance(
|
| 105 |
+
x2, jax_sparse.JAXSparse
|
| 106 |
+
):
|
| 107 |
+
if not hasattr(matmul, "sparse_matmul"):
|
| 108 |
+
matmul.sparse_matmul = jax_sparse.sparsify(jnp.matmul)
|
| 109 |
+
if isinstance(x1, jax_sparse.BCOO):
|
| 110 |
+
x1 = jax_sparse.bcoo_update_layout(
|
| 111 |
+
x1, n_batch=len(x1.shape) - 2, on_inefficient="warn"
|
| 112 |
+
)
|
| 113 |
+
if isinstance(x2, jax_sparse.BCOO):
|
| 114 |
+
x2 = jax_sparse.bcoo_update_layout(
|
| 115 |
+
x2, n_batch=len(x2.shape) - 2, on_inefficient="warn"
|
| 116 |
+
)
|
| 117 |
+
return matmul.sparse_matmul(
|
| 118 |
+
x1, x2, preferred_element_type=preferred_element_type
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
return jnp.matmul(x1, x2, preferred_element_type=preferred_element_type)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def multiply(x1, x2):
|
| 125 |
+
x1 = convert_to_tensor(x1)
|
| 126 |
+
x2 = convert_to_tensor(x2)
|
| 127 |
+
if isinstance(x1, jax_sparse.BCOO):
|
| 128 |
+
if isinstance(x2, jax_sparse.BCOO):
|
| 129 |
+
# x1 is sparse, x2 is sparse.
|
| 130 |
+
if x1.indices is x2.indices:
|
| 131 |
+
# `bcoo_multiply_sparse` will not detect that the indices are
|
| 132 |
+
# the same, optimize this case here.
|
| 133 |
+
if not x1.unique_indices:
|
| 134 |
+
x1 = jax_sparse.bcoo_sum_duplicates(x1)
|
| 135 |
+
x2 = jax_sparse.bcoo_sum_duplicates(x2)
|
| 136 |
+
return jax_sparse.BCOO(
|
| 137 |
+
(jnp.multiply(x1.data, x2.data), x1.indices),
|
| 138 |
+
shape=x1.shape,
|
| 139 |
+
indices_sorted=True,
|
| 140 |
+
unique_indices=True,
|
| 141 |
+
)
|
| 142 |
+
else:
|
| 143 |
+
return jax_sparse.bcoo_multiply_sparse(x1, x2)
|
| 144 |
+
else:
|
| 145 |
+
# x1 is sparse, x2 is dense.
|
| 146 |
+
out_data = jax_sparse.bcoo_multiply_dense(x1, x2)
|
| 147 |
+
return jax_sparse.BCOO(
|
| 148 |
+
(out_data, x1.indices),
|
| 149 |
+
shape=x1.shape,
|
| 150 |
+
indices_sorted=x1.indices_sorted,
|
| 151 |
+
unique_indices=x1.unique_indices,
|
| 152 |
+
)
|
| 153 |
+
elif isinstance(x2, jax_sparse.BCOO):
|
| 154 |
+
# x1 is dense, x2 is sparse.
|
| 155 |
+
out_data = jax_sparse.bcoo_multiply_dense(x2, x1)
|
| 156 |
+
return jax_sparse.BCOO(
|
| 157 |
+
(out_data, x2.indices),
|
| 158 |
+
shape=x2.shape,
|
| 159 |
+
indices_sorted=x2.indices_sorted,
|
| 160 |
+
unique_indices=x2.unique_indices,
|
| 161 |
+
)
|
| 162 |
+
return jnp.multiply(x1, x2)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def mean(x, axis=None, keepdims=False):
|
| 166 |
+
x = convert_to_tensor(x)
|
| 167 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 168 |
+
# `jnp.mean` does not handle low precision (e.g., float16) overflow
|
| 169 |
+
# correctly, so we compute with float32 and cast back to the original type.
|
| 170 |
+
compute_dtype = dtypes.result_type(x.dtype, "float32")
|
| 171 |
+
if "int" in ori_dtype or ori_dtype == "bool":
|
| 172 |
+
result_dtype = compute_dtype
|
| 173 |
+
else:
|
| 174 |
+
result_dtype = ori_dtype
|
| 175 |
+
if isinstance(x, jax_sparse.BCOO):
|
| 176 |
+
if axis is None:
|
| 177 |
+
axis = tuple(range(len(x.shape)))
|
| 178 |
+
(
|
| 179 |
+
canonical_axis,
|
| 180 |
+
keep_dims_shape,
|
| 181 |
+
broadcast_dimensions,
|
| 182 |
+
) = sparse.axis_shape_dims_for_broadcast_in_dim(
|
| 183 |
+
axis, x.shape, insert_dims=False
|
| 184 |
+
)
|
| 185 |
+
divisor = math.prod(x.shape[i] for i in canonical_axis)
|
| 186 |
+
output = jax_sparse.bcoo_reduce_sum(x, axes=canonical_axis)
|
| 187 |
+
output = jax_sparse.BCOO(
|
| 188 |
+
(output.data.astype(result_dtype) / divisor, output.indices),
|
| 189 |
+
shape=output.shape,
|
| 190 |
+
)
|
| 191 |
+
if keepdims:
|
| 192 |
+
# `bcoo_reduce_sum` does not support keepdims, neither does
|
| 193 |
+
# sparsify(jnp.sum), so we recreate the empty dimensions.
|
| 194 |
+
output = jax_sparse.bcoo_broadcast_in_dim(
|
| 195 |
+
output,
|
| 196 |
+
shape=keep_dims_shape,
|
| 197 |
+
broadcast_dimensions=broadcast_dimensions,
|
| 198 |
+
)
|
| 199 |
+
return output
|
| 200 |
+
else:
|
| 201 |
+
output = jnp.mean(x, axis=axis, keepdims=keepdims, dtype=compute_dtype)
|
| 202 |
+
return cast(output, result_dtype)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def max(x, axis=None, keepdims=False, initial=None):
|
| 206 |
+
x = convert_to_tensor(x)
|
| 207 |
+
return jnp.max(x, axis=axis, keepdims=keepdims, initial=initial)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def ones(shape, dtype=None):
|
| 211 |
+
dtype = dtype or config.floatx()
|
| 212 |
+
return jnp.ones(shape, dtype=dtype)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def zeros(shape, dtype=None):
|
| 216 |
+
dtype = dtype or config.floatx()
|
| 217 |
+
return jnp.zeros(shape, dtype=dtype)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
@sparse.elementwise_unary(linear=False)
|
| 221 |
+
def absolute(x):
|
| 222 |
+
x = convert_to_tensor(x)
|
| 223 |
+
return jnp.absolute(x)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def abs(x):
|
| 227 |
+
return absolute(x)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def all(x, axis=None, keepdims=False):
|
| 231 |
+
return jnp.all(x, axis=axis, keepdims=keepdims)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def any(x, axis=None, keepdims=False):
|
| 235 |
+
return jnp.any(x, axis=axis, keepdims=keepdims)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def amax(x, axis=None, keepdims=False):
|
| 239 |
+
return jnp.amax(x, axis=axis, keepdims=keepdims)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def amin(x, axis=None, keepdims=False):
|
| 243 |
+
return jnp.amin(x, axis=axis, keepdims=keepdims)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def append(x1, x2, axis=None):
|
| 247 |
+
x1 = convert_to_tensor(x1)
|
| 248 |
+
x2 = convert_to_tensor(x2)
|
| 249 |
+
return jnp.append(x1, x2, axis=axis)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def arange(start, stop=None, step=1, dtype=None):
|
| 253 |
+
if dtype is None:
|
| 254 |
+
dtypes_to_resolve = [
|
| 255 |
+
getattr(start, "dtype", type(start)),
|
| 256 |
+
getattr(step, "dtype", type(step)),
|
| 257 |
+
]
|
| 258 |
+
if stop is not None:
|
| 259 |
+
dtypes_to_resolve.append(getattr(stop, "dtype", type(stop)))
|
| 260 |
+
dtype = dtypes.result_type(*dtypes_to_resolve)
|
| 261 |
+
dtype = standardize_dtype(dtype)
|
| 262 |
+
return jnp.arange(start, stop, step=step, dtype=dtype)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
@sparse.densifying_unary
|
| 266 |
+
def arccos(x):
|
| 267 |
+
x = convert_to_tensor(x)
|
| 268 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 269 |
+
dtype = config.floatx()
|
| 270 |
+
else:
|
| 271 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 272 |
+
x = cast(x, dtype)
|
| 273 |
+
return jnp.arccos(x)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
@sparse.densifying_unary
|
| 277 |
+
def arccosh(x):
|
| 278 |
+
x = convert_to_tensor(x)
|
| 279 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 280 |
+
dtype = config.floatx()
|
| 281 |
+
else:
|
| 282 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 283 |
+
x = cast(x, dtype)
|
| 284 |
+
return jnp.arccosh(x)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
@sparse.elementwise_unary(linear=False)
|
| 288 |
+
def arcsin(x):
|
| 289 |
+
x = convert_to_tensor(x)
|
| 290 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 291 |
+
dtype = config.floatx()
|
| 292 |
+
else:
|
| 293 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 294 |
+
x = cast(x, dtype)
|
| 295 |
+
return jnp.arcsin(x)
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
@sparse.elementwise_unary(linear=False)
|
| 299 |
+
def arcsinh(x):
|
| 300 |
+
x = convert_to_tensor(x)
|
| 301 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 302 |
+
dtype = config.floatx()
|
| 303 |
+
else:
|
| 304 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 305 |
+
x = cast(x, dtype)
|
| 306 |
+
return jnp.arcsinh(x)
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
@sparse.elementwise_unary(linear=False)
|
| 310 |
+
def arctan(x):
|
| 311 |
+
x = convert_to_tensor(x)
|
| 312 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 313 |
+
dtype = config.floatx()
|
| 314 |
+
else:
|
| 315 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 316 |
+
x = cast(x, dtype)
|
| 317 |
+
return jnp.arctan(x)
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def arctan2(x1, x2):
|
| 321 |
+
x1 = convert_to_tensor(x1)
|
| 322 |
+
x2 = convert_to_tensor(x2)
|
| 323 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
|
| 324 |
+
x1 = cast(x1, dtype)
|
| 325 |
+
x2 = cast(x2, dtype)
|
| 326 |
+
return jnp.arctan2(x1, x2)
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
@sparse.elementwise_unary(linear=False)
|
| 330 |
+
def arctanh(x):
|
| 331 |
+
x = convert_to_tensor(x)
|
| 332 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 333 |
+
dtype = config.floatx()
|
| 334 |
+
else:
|
| 335 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 336 |
+
x = cast(x, dtype)
|
| 337 |
+
return jnp.arctanh(x)
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def argmax(x, axis=None, keepdims=False):
|
| 341 |
+
return jnp.argmax(x, axis=axis, keepdims=keepdims)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def argmin(x, axis=None, keepdims=False):
|
| 345 |
+
return jnp.argmin(x, axis=axis, keepdims=keepdims)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def argsort(x, axis=-1):
|
| 349 |
+
x = convert_to_tensor(x)
|
| 350 |
+
if x.ndim == 0:
|
| 351 |
+
return jnp.argsort(x, axis=None)
|
| 352 |
+
return jnp.argsort(x, axis=axis)
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def array(x, dtype=None):
|
| 356 |
+
return jnp.array(x, dtype=dtype)
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def average(x, axis=None, weights=None):
|
| 360 |
+
x = convert_to_tensor(x)
|
| 361 |
+
dtypes_to_resolve = [x.dtype, float]
|
| 362 |
+
if weights is not None:
|
| 363 |
+
weights = convert_to_tensor(weights)
|
| 364 |
+
dtypes_to_resolve.append(weights.dtype)
|
| 365 |
+
dtype = dtypes.result_type(*dtypes_to_resolve)
|
| 366 |
+
x = cast(x, dtype)
|
| 367 |
+
if weights is not None:
|
| 368 |
+
weights = cast(weights, dtype)
|
| 369 |
+
return jnp.average(x, weights=weights, axis=axis)
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def bitwise_and(x, y):
|
| 373 |
+
x = convert_to_tensor(x)
|
| 374 |
+
y = convert_to_tensor(y)
|
| 375 |
+
return jnp.bitwise_and(x, y)
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def bitwise_invert(x):
|
| 379 |
+
x = convert_to_tensor(x)
|
| 380 |
+
return jnp.invert(x)
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def bitwise_not(x):
|
| 384 |
+
return bitwise_invert(x)
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def bitwise_or(x, y):
|
| 388 |
+
x = convert_to_tensor(x)
|
| 389 |
+
y = convert_to_tensor(y)
|
| 390 |
+
return jnp.bitwise_or(x, y)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def bitwise_xor(x, y):
|
| 394 |
+
x = convert_to_tensor(x)
|
| 395 |
+
y = convert_to_tensor(y)
|
| 396 |
+
return jnp.bitwise_xor(x, y)
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def bitwise_left_shift(x, y):
|
| 400 |
+
x = convert_to_tensor(x)
|
| 401 |
+
y = convert_to_tensor(y)
|
| 402 |
+
return jnp.left_shift(x, y)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def left_shift(x, y):
|
| 406 |
+
return bitwise_left_shift(x, y)
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def bitwise_right_shift(x, y):
|
| 410 |
+
x = convert_to_tensor(x)
|
| 411 |
+
y = convert_to_tensor(y)
|
| 412 |
+
return jnp.right_shift(x, y)
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def right_shift(x, y):
|
| 416 |
+
return bitwise_right_shift(x, y)
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
def broadcast_to(x, shape):
|
| 420 |
+
x = convert_to_tensor(x)
|
| 421 |
+
return jnp.broadcast_to(x, shape)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
@sparse.elementwise_unary(linear=False)
|
| 425 |
+
def ceil(x):
|
| 426 |
+
x = convert_to_tensor(x)
|
| 427 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 428 |
+
dtype = config.floatx()
|
| 429 |
+
else:
|
| 430 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 431 |
+
x = cast(x, dtype)
|
| 432 |
+
return jnp.ceil(x)
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def clip(x, x_min, x_max):
|
| 436 |
+
x = convert_to_tensor(x)
|
| 437 |
+
if standardize_dtype(x.dtype) == "bool":
|
| 438 |
+
x = cast(x, "int32")
|
| 439 |
+
return jnp.clip(x, x_min, x_max)
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def concatenate(xs, axis=0):
|
| 443 |
+
bcoo_count = builtins.sum(isinstance(x, jax_sparse.BCOO) for x in xs)
|
| 444 |
+
if bcoo_count:
|
| 445 |
+
if bcoo_count == len(xs):
|
| 446 |
+
axis = canonicalize_axis(axis, len(xs[0].shape))
|
| 447 |
+
return jax_sparse.bcoo_concatenate(xs, dimension=axis)
|
| 448 |
+
else:
|
| 449 |
+
xs = [
|
| 450 |
+
x.todense() if isinstance(x, jax_sparse.JAXSparse) else x
|
| 451 |
+
for x in xs
|
| 452 |
+
]
|
| 453 |
+
return jnp.concatenate(xs, axis=axis)
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
@sparse.elementwise_unary(linear=True)
|
| 457 |
+
def conjugate(x):
|
| 458 |
+
x = convert_to_tensor(x)
|
| 459 |
+
return jnp.conjugate(x)
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
@sparse.elementwise_unary(linear=True)
|
| 463 |
+
def conj(x):
|
| 464 |
+
x = convert_to_tensor(x)
|
| 465 |
+
return jnp.conjugate(x)
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
@sparse.elementwise_unary(linear=True)
|
| 469 |
+
def copy(x):
|
| 470 |
+
x = convert_to_tensor(x)
|
| 471 |
+
return jnp.copy(x)
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
@sparse.densifying_unary
|
| 475 |
+
def cos(x):
|
| 476 |
+
x = convert_to_tensor(x)
|
| 477 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 478 |
+
dtype = config.floatx()
|
| 479 |
+
else:
|
| 480 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 481 |
+
x = cast(x, dtype)
|
| 482 |
+
return jnp.cos(x)
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
@sparse.densifying_unary
|
| 486 |
+
def cosh(x):
|
| 487 |
+
x = convert_to_tensor(x)
|
| 488 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 489 |
+
dtype = config.floatx()
|
| 490 |
+
else:
|
| 491 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 492 |
+
x = cast(x, dtype)
|
| 493 |
+
return jnp.cosh(x)
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def count_nonzero(x, axis=None):
|
| 497 |
+
return cast(jnp.count_nonzero(x, axis=axis), "int32")
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
|
| 501 |
+
x1 = convert_to_tensor(x1)
|
| 502 |
+
x2 = convert_to_tensor(x2)
|
| 503 |
+
return jnp.cross(
|
| 504 |
+
x1,
|
| 505 |
+
x2,
|
| 506 |
+
axisa=axisa,
|
| 507 |
+
axisb=axisb,
|
| 508 |
+
axisc=axisc,
|
| 509 |
+
axis=axis,
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def cumprod(x, axis=None, dtype=None):
|
| 514 |
+
x = convert_to_tensor(x)
|
| 515 |
+
return jnp.cumprod(x, axis=axis, dtype=dtype)
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
def cumsum(x, axis=None, dtype=None):
|
| 519 |
+
x = convert_to_tensor(x)
|
| 520 |
+
return jnp.cumsum(x, axis=axis, dtype=dtype)
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
def diag(x, k=0):
|
| 524 |
+
x = convert_to_tensor(x)
|
| 525 |
+
return jnp.diag(x, k=k)
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def diagflat(x, k=0):
|
| 529 |
+
x = convert_to_tensor(x)
|
| 530 |
+
return jnp.diagflat(x, k=k)
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def diagonal(x, offset=0, axis1=0, axis2=1):
|
| 534 |
+
x = convert_to_tensor(x)
|
| 535 |
+
return jnp.diagonal(
|
| 536 |
+
x,
|
| 537 |
+
offset=offset,
|
| 538 |
+
axis1=axis1,
|
| 539 |
+
axis2=axis2,
|
| 540 |
+
)
|
| 541 |
+
|
| 542 |
+
|
| 543 |
+
def diff(a, n=1, axis=-1):
|
| 544 |
+
a = convert_to_tensor(a)
|
| 545 |
+
return jnp.diff(a, n=n, axis=axis)
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
@sparse.elementwise_unary(linear=False)
|
| 549 |
+
def digitize(x, bins):
|
| 550 |
+
x = convert_to_tensor(x)
|
| 551 |
+
bins = convert_to_tensor(bins)
|
| 552 |
+
return jnp.digitize(x, bins)
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
def dot(x, y):
|
| 556 |
+
x = convert_to_tensor(x)
|
| 557 |
+
y = convert_to_tensor(y)
|
| 558 |
+
return jnp.dot(x, y)
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
def empty(shape, dtype=None):
|
| 562 |
+
dtype = dtype or config.floatx()
|
| 563 |
+
return jnp.empty(shape, dtype=dtype)
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
def equal(x1, x2):
|
| 567 |
+
x1 = convert_to_tensor(x1)
|
| 568 |
+
x2 = convert_to_tensor(x2)
|
| 569 |
+
return jnp.equal(x1, x2)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
@sparse.densifying_unary
|
| 573 |
+
def exp(x):
|
| 574 |
+
x = convert_to_tensor(x)
|
| 575 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 576 |
+
if "int" in ori_dtype or ori_dtype == "bool":
|
| 577 |
+
x = cast(x, config.floatx())
|
| 578 |
+
return jnp.exp(x)
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
@sparse.densifying_unary
|
| 582 |
+
def exp2(x):
|
| 583 |
+
x = convert_to_tensor(x)
|
| 584 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 585 |
+
if "int" in ori_dtype or ori_dtype == "bool":
|
| 586 |
+
x = cast(x, config.floatx())
|
| 587 |
+
return jnp.exp2(x)
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
def expand_dims(x, axis):
|
| 591 |
+
x = convert_to_tensor(x)
|
| 592 |
+
if isinstance(x, jax_sparse.BCOO):
|
| 593 |
+
(
|
| 594 |
+
_,
|
| 595 |
+
result_shape,
|
| 596 |
+
broadcast_dimensions,
|
| 597 |
+
) = sparse.axis_shape_dims_for_broadcast_in_dim(
|
| 598 |
+
axis, x.shape, insert_dims=True
|
| 599 |
+
)
|
| 600 |
+
return jax_sparse.bcoo_broadcast_in_dim(
|
| 601 |
+
x, shape=result_shape, broadcast_dimensions=broadcast_dimensions
|
| 602 |
+
)
|
| 603 |
+
return jnp.expand_dims(x, axis)
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
@sparse.elementwise_unary(linear=False)
|
| 607 |
+
def expm1(x):
|
| 608 |
+
x = convert_to_tensor(x)
|
| 609 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 610 |
+
if "int" in ori_dtype or ori_dtype == "bool":
|
| 611 |
+
x = cast(x, config.floatx())
|
| 612 |
+
return jnp.expm1(x)
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
def flip(x, axis=None):
|
| 616 |
+
return jnp.flip(x, axis=axis)
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
@sparse.elementwise_unary(linear=False)
|
| 620 |
+
def floor(x):
|
| 621 |
+
x = convert_to_tensor(x)
|
| 622 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 623 |
+
dtype = config.floatx()
|
| 624 |
+
else:
|
| 625 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 626 |
+
x = cast(x, dtype)
|
| 627 |
+
return jnp.floor(x)
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
def full(shape, fill_value, dtype=None):
|
| 631 |
+
dtype = dtype or config.floatx()
|
| 632 |
+
return jnp.full(shape, fill_value, dtype=dtype)
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
def full_like(x, fill_value, dtype=None):
|
| 636 |
+
return jnp.full_like(x, fill_value, dtype=dtype)
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
def greater(x1, x2):
|
| 640 |
+
x1 = convert_to_tensor(x1)
|
| 641 |
+
x2 = convert_to_tensor(x2)
|
| 642 |
+
return jnp.greater(x1, x2)
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
def greater_equal(x1, x2):
|
| 646 |
+
x1 = convert_to_tensor(x1)
|
| 647 |
+
x2 = convert_to_tensor(x2)
|
| 648 |
+
return jnp.greater_equal(x1, x2)
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
def hstack(xs):
|
| 652 |
+
return jnp.hstack(xs)
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
def identity(n, dtype=None):
|
| 656 |
+
dtype = dtype or config.floatx()
|
| 657 |
+
return jnp.identity(n, dtype=dtype)
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
@sparse.elementwise_unary(linear=True)
|
| 661 |
+
def imag(x):
|
| 662 |
+
x = convert_to_tensor(x)
|
| 663 |
+
return jnp.imag(x)
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def isclose(x1, x2, rtol=1e-5, atol=1e-8, equal_nan=False):
|
| 667 |
+
x1 = convert_to_tensor(x1)
|
| 668 |
+
x2 = convert_to_tensor(x2)
|
| 669 |
+
return jnp.isclose(x1, x2, rtol, atol, equal_nan)
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
@sparse.densifying_unary
|
| 673 |
+
def isfinite(x):
|
| 674 |
+
x = convert_to_tensor(x)
|
| 675 |
+
return jnp.isfinite(x)
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
@sparse.elementwise_unary(linear=False)
|
| 679 |
+
def isinf(x):
|
| 680 |
+
x = convert_to_tensor(x)
|
| 681 |
+
return jnp.isinf(x)
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
@sparse.elementwise_unary(linear=False)
|
| 685 |
+
def isnan(x):
|
| 686 |
+
x = convert_to_tensor(x)
|
| 687 |
+
return jnp.isnan(x)
|
| 688 |
+
|
| 689 |
+
|
| 690 |
+
def less(x1, x2):
|
| 691 |
+
x1 = convert_to_tensor(x1)
|
| 692 |
+
x2 = convert_to_tensor(x2)
|
| 693 |
+
return jnp.less(x1, x2)
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
def less_equal(x1, x2):
|
| 697 |
+
x1 = convert_to_tensor(x1)
|
| 698 |
+
x2 = convert_to_tensor(x2)
|
| 699 |
+
return jnp.less_equal(x1, x2)
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
def linspace(
|
| 703 |
+
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
|
| 704 |
+
):
|
| 705 |
+
return jnp.linspace(
|
| 706 |
+
start,
|
| 707 |
+
stop,
|
| 708 |
+
num=num,
|
| 709 |
+
endpoint=endpoint,
|
| 710 |
+
retstep=retstep,
|
| 711 |
+
dtype=dtype,
|
| 712 |
+
axis=axis,
|
| 713 |
+
)
|
| 714 |
+
|
| 715 |
+
|
| 716 |
+
@sparse.densifying_unary
|
| 717 |
+
def log(x):
|
| 718 |
+
x = convert_to_tensor(x)
|
| 719 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 720 |
+
x = cast(x, config.floatx())
|
| 721 |
+
return jnp.log(x)
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
@sparse.densifying_unary
|
| 725 |
+
def log10(x):
|
| 726 |
+
x = convert_to_tensor(x)
|
| 727 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 728 |
+
x = cast(x, config.floatx())
|
| 729 |
+
return jnp.log10(x)
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
@sparse.elementwise_unary(linear=False)
|
| 733 |
+
def log1p(x):
|
| 734 |
+
x = convert_to_tensor(x)
|
| 735 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 736 |
+
x = cast(x, config.floatx())
|
| 737 |
+
return jnp.log1p(x)
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
@sparse.densifying_unary
|
| 741 |
+
def log2(x):
|
| 742 |
+
x = convert_to_tensor(x)
|
| 743 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 744 |
+
x = cast(x, config.floatx())
|
| 745 |
+
return jnp.log2(x)
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
def logaddexp(x1, x2):
|
| 749 |
+
x1 = convert_to_tensor(x1)
|
| 750 |
+
x2 = convert_to_tensor(x2)
|
| 751 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
|
| 752 |
+
x1 = cast(x1, dtype)
|
| 753 |
+
x2 = cast(x2, dtype)
|
| 754 |
+
return jnp.logaddexp(x1, x2)
|
| 755 |
+
|
| 756 |
+
|
| 757 |
+
def logical_and(x1, x2):
|
| 758 |
+
x1 = convert_to_tensor(x1)
|
| 759 |
+
x2 = convert_to_tensor(x2)
|
| 760 |
+
return jnp.logical_and(x1, x2)
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
def logical_not(x):
|
| 764 |
+
x = convert_to_tensor(x)
|
| 765 |
+
return jnp.logical_not(x)
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
def logical_or(x1, x2):
|
| 769 |
+
x1 = convert_to_tensor(x1)
|
| 770 |
+
x2 = convert_to_tensor(x2)
|
| 771 |
+
return jnp.logical_or(x1, x2)
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
|
| 775 |
+
return jnp.logspace(
|
| 776 |
+
start,
|
| 777 |
+
stop,
|
| 778 |
+
num=num,
|
| 779 |
+
endpoint=endpoint,
|
| 780 |
+
base=base,
|
| 781 |
+
dtype=dtype,
|
| 782 |
+
axis=axis,
|
| 783 |
+
)
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
@sparse.elementwise_binary_union(linear=False, use_sparsify=False)
|
| 787 |
+
def maximum(x1, x2):
|
| 788 |
+
x1 = convert_to_tensor(x1)
|
| 789 |
+
x2 = convert_to_tensor(x2)
|
| 790 |
+
return jnp.maximum(x1, x2)
|
| 791 |
+
|
| 792 |
+
|
| 793 |
+
def median(x, axis=None, keepdims=False):
|
| 794 |
+
# axis of jnp.median must be hashable
|
| 795 |
+
if isinstance(axis, list):
|
| 796 |
+
axis = tuple(axis)
|
| 797 |
+
x = convert_to_tensor(x)
|
| 798 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 799 |
+
x = cast(x, config.floatx())
|
| 800 |
+
|
| 801 |
+
result = jnp.median(x, axis=axis, keepdims=keepdims)
|
| 802 |
+
|
| 803 |
+
# TODO: with jax < 0.4.26 jnp.median failed to keepdims when axis is None
|
| 804 |
+
if keepdims is True and axis is None:
|
| 805 |
+
while result.ndim < x.ndim:
|
| 806 |
+
result = jnp.expand_dims(result, axis=-1)
|
| 807 |
+
return result
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
def meshgrid(*x, indexing="xy"):
|
| 811 |
+
return jnp.meshgrid(*x, indexing=indexing)
|
| 812 |
+
|
| 813 |
+
|
| 814 |
+
def min(x, axis=None, keepdims=False, initial=None):
|
| 815 |
+
x = convert_to_tensor(x)
|
| 816 |
+
return jnp.min(x, axis=axis, keepdims=keepdims, initial=initial)
|
| 817 |
+
|
| 818 |
+
|
| 819 |
+
@sparse.elementwise_binary_union(linear=False, use_sparsify=False)
|
| 820 |
+
def minimum(x1, x2):
|
| 821 |
+
x1 = convert_to_tensor(x1)
|
| 822 |
+
x2 = convert_to_tensor(x2)
|
| 823 |
+
return jnp.minimum(x1, x2)
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
def mod(x1, x2):
|
| 827 |
+
x1 = convert_to_tensor(x1)
|
| 828 |
+
x2 = convert_to_tensor(x2)
|
| 829 |
+
return jnp.mod(x1, x2)
|
| 830 |
+
|
| 831 |
+
|
| 832 |
+
def moveaxis(x, source, destination):
|
| 833 |
+
return jnp.moveaxis(x, source=source, destination=destination)
|
| 834 |
+
|
| 835 |
+
|
| 836 |
+
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
|
| 837 |
+
x = convert_to_tensor(x)
|
| 838 |
+
return jnp.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
|
| 839 |
+
|
| 840 |
+
|
| 841 |
+
def ndim(x):
|
| 842 |
+
return jnp.ndim(x)
|
| 843 |
+
|
| 844 |
+
|
| 845 |
+
def nonzero(x):
|
| 846 |
+
return jnp.nonzero(x)
|
| 847 |
+
|
| 848 |
+
|
| 849 |
+
def not_equal(x1, x2):
|
| 850 |
+
x1 = convert_to_tensor(x1)
|
| 851 |
+
x2 = convert_to_tensor(x2)
|
| 852 |
+
return jnp.not_equal(x1, x2)
|
| 853 |
+
|
| 854 |
+
|
| 855 |
+
def ones_like(x, dtype=None):
|
| 856 |
+
return jnp.ones_like(x, dtype=dtype)
|
| 857 |
+
|
| 858 |
+
|
| 859 |
+
def zeros_like(x, dtype=None):
|
| 860 |
+
return jnp.zeros_like(x, dtype=dtype)
|
| 861 |
+
|
| 862 |
+
|
| 863 |
+
def outer(x1, x2):
|
| 864 |
+
return jnp.outer(x1, x2)
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
def pad(x, pad_width, mode="constant", constant_values=None):
|
| 868 |
+
x = convert_to_tensor(x)
|
| 869 |
+
kwargs = {}
|
| 870 |
+
if constant_values is not None:
|
| 871 |
+
if mode != "constant":
|
| 872 |
+
raise ValueError(
|
| 873 |
+
"Argument `constant_values` can only be "
|
| 874 |
+
"provided when `mode == 'constant'`. "
|
| 875 |
+
f"Received: mode={mode}"
|
| 876 |
+
)
|
| 877 |
+
kwargs["constant_values"] = constant_values
|
| 878 |
+
return jnp.pad(x, pad_width, mode=mode, **kwargs)
|
| 879 |
+
|
| 880 |
+
|
| 881 |
+
def prod(x, axis=None, keepdims=False, dtype=None):
|
| 882 |
+
x = convert_to_tensor(x)
|
| 883 |
+
return jnp.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
def quantile(x, q, axis=None, method="linear", keepdims=False):
|
| 887 |
+
x = convert_to_tensor(x)
|
| 888 |
+
q = convert_to_tensor(q)
|
| 889 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 890 |
+
x = cast(x, config.floatx())
|
| 891 |
+
|
| 892 |
+
result = jnp.quantile(x, q, axis=axis, method=method, keepdims=keepdims)
|
| 893 |
+
|
| 894 |
+
# TODO: with jax < 0.4.26 jnp.quantile failed to keepdims when axis is None
|
| 895 |
+
if keepdims is True and axis is None:
|
| 896 |
+
result_ndim = x.ndim + (1 if len(q.shape) > 0 else 0)
|
| 897 |
+
while result.ndim < result_ndim:
|
| 898 |
+
result = jnp.expand_dims(result, axis=-1)
|
| 899 |
+
return result
|
| 900 |
+
|
| 901 |
+
|
| 902 |
+
def ravel(x):
|
| 903 |
+
x = convert_to_tensor(x)
|
| 904 |
+
return jnp.ravel(x)
|
| 905 |
+
|
| 906 |
+
|
| 907 |
+
def unravel_index(x, shape):
|
| 908 |
+
x = convert_to_tensor(x)
|
| 909 |
+
return jnp.unravel_index(x, shape)
|
| 910 |
+
|
| 911 |
+
|
| 912 |
+
@sparse.elementwise_unary(linear=True)
|
| 913 |
+
def real(x):
|
| 914 |
+
x = convert_to_tensor(x)
|
| 915 |
+
return jnp.real(x)
|
| 916 |
+
|
| 917 |
+
|
| 918 |
+
@sparse.densifying_unary
|
| 919 |
+
def reciprocal(x):
|
| 920 |
+
x = convert_to_tensor(x)
|
| 921 |
+
return jnp.reciprocal(x)
|
| 922 |
+
|
| 923 |
+
|
| 924 |
+
def repeat(x, repeats, axis=None):
|
| 925 |
+
x = convert_to_tensor(x)
|
| 926 |
+
return jnp.repeat(x, repeats, axis=axis)
|
| 927 |
+
|
| 928 |
+
|
| 929 |
+
def reshape(x, newshape):
|
| 930 |
+
if isinstance(x, jax_sparse.BCOO):
|
| 931 |
+
from keras.src.ops import operation_utils
|
| 932 |
+
|
| 933 |
+
# Resolve the -1 in `new_shape` if applicable and possible
|
| 934 |
+
output_shape = operation_utils.compute_reshape_output_shape(
|
| 935 |
+
x.shape, newshape, "new_shape"
|
| 936 |
+
)
|
| 937 |
+
if None not in output_shape:
|
| 938 |
+
newshape = output_shape
|
| 939 |
+
return jax_sparse.bcoo_reshape(x, new_sizes=newshape)
|
| 940 |
+
return jnp.reshape(x, newshape)
|
| 941 |
+
|
| 942 |
+
|
| 943 |
+
def roll(x, shift, axis=None):
|
| 944 |
+
return jnp.roll(x, shift, axis=axis)
|
| 945 |
+
|
| 946 |
+
|
| 947 |
+
def searchsorted(sorted_sequence, values, side="left"):
|
| 948 |
+
if ndim(sorted_sequence) != 1:
|
| 949 |
+
raise ValueError(
|
| 950 |
+
"`searchsorted` only supports 1-D sorted sequences. "
|
| 951 |
+
"You can use `keras.ops.vectorized_map` "
|
| 952 |
+
"to extend it to N-D sequences. Received: "
|
| 953 |
+
f"sorted_sequence.shape={sorted_sequence.shape}"
|
| 954 |
+
)
|
| 955 |
+
return jnp.searchsorted(sorted_sequence, values, side=side)
|
| 956 |
+
|
| 957 |
+
|
| 958 |
+
@sparse.elementwise_unary(linear=False)
|
| 959 |
+
def sign(x):
|
| 960 |
+
x = convert_to_tensor(x)
|
| 961 |
+
return jnp.sign(x)
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
@sparse.elementwise_unary(linear=False)
|
| 965 |
+
def sin(x):
|
| 966 |
+
x = convert_to_tensor(x)
|
| 967 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 968 |
+
dtype = config.floatx()
|
| 969 |
+
else:
|
| 970 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 971 |
+
x = cast(x, dtype)
|
| 972 |
+
return jnp.sin(x)
|
| 973 |
+
|
| 974 |
+
|
| 975 |
+
@sparse.elementwise_unary(linear=False)
|
| 976 |
+
def sinh(x):
|
| 977 |
+
x = convert_to_tensor(x)
|
| 978 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 979 |
+
dtype = config.floatx()
|
| 980 |
+
else:
|
| 981 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 982 |
+
x = cast(x, dtype)
|
| 983 |
+
return jnp.sinh(x)
|
| 984 |
+
|
| 985 |
+
|
| 986 |
+
def size(x):
|
| 987 |
+
return jnp.size(x)
|
| 988 |
+
|
| 989 |
+
|
| 990 |
+
def sort(x, axis=-1):
|
| 991 |
+
x = convert_to_tensor(x)
|
| 992 |
+
return jnp.sort(x, axis=axis)
|
| 993 |
+
|
| 994 |
+
|
| 995 |
+
def split(x, indices_or_sections, axis=0):
|
| 996 |
+
return jnp.split(x, indices_or_sections, axis=axis)
|
| 997 |
+
|
| 998 |
+
|
| 999 |
+
def stack(x, axis=0):
|
| 1000 |
+
return jnp.stack(x, axis=axis)
|
| 1001 |
+
|
| 1002 |
+
|
| 1003 |
+
def std(x, axis=None, keepdims=False):
|
| 1004 |
+
x = convert_to_tensor(x)
|
| 1005 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 1006 |
+
x = cast(x, config.floatx())
|
| 1007 |
+
return jnp.std(x, axis=axis, keepdims=keepdims)
|
| 1008 |
+
|
| 1009 |
+
|
| 1010 |
+
def swapaxes(x, axis1, axis2):
|
| 1011 |
+
x = convert_to_tensor(x)
|
| 1012 |
+
return jnp.swapaxes(x, axis1=axis1, axis2=axis2)
|
| 1013 |
+
|
| 1014 |
+
|
| 1015 |
+
def take(x, indices, axis=None):
|
| 1016 |
+
x = convert_to_tensor(x)
|
| 1017 |
+
indices = convert_to_tensor(indices, sparse=False)
|
| 1018 |
+
return jnp.take(x, indices, axis=axis)
|
| 1019 |
+
|
| 1020 |
+
|
| 1021 |
+
def take_along_axis(x, indices, axis=None):
|
| 1022 |
+
return jnp.take_along_axis(x, indices, axis=axis)
|
| 1023 |
+
|
| 1024 |
+
|
| 1025 |
+
@sparse.elementwise_unary(linear=False)
|
| 1026 |
+
def tan(x):
|
| 1027 |
+
x = convert_to_tensor(x)
|
| 1028 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 1029 |
+
dtype = config.floatx()
|
| 1030 |
+
else:
|
| 1031 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 1032 |
+
x = cast(x, dtype)
|
| 1033 |
+
return jnp.tan(x)
|
| 1034 |
+
|
| 1035 |
+
|
| 1036 |
+
@sparse.elementwise_unary(linear=False)
|
| 1037 |
+
def tanh(x):
|
| 1038 |
+
x = convert_to_tensor(x)
|
| 1039 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 1040 |
+
dtype = config.floatx()
|
| 1041 |
+
else:
|
| 1042 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 1043 |
+
x = cast(x, dtype)
|
| 1044 |
+
return jnp.tanh(x)
|
| 1045 |
+
|
| 1046 |
+
|
| 1047 |
+
def tensordot(x1, x2, axes=2):
|
| 1048 |
+
x1 = convert_to_tensor(x1)
|
| 1049 |
+
x2 = convert_to_tensor(x2)
|
| 1050 |
+
return jnp.tensordot(x1, x2, axes=axes)
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
@sparse.elementwise_unary(linear=False)
|
| 1054 |
+
def round(x, decimals=0):
|
| 1055 |
+
x = convert_to_tensor(x)
|
| 1056 |
+
|
| 1057 |
+
# jnp.round doesn't support decimals < 0 for integers
|
| 1058 |
+
x_dtype = standardize_dtype(x.dtype)
|
| 1059 |
+
if "int" in x_dtype and decimals < 0:
|
| 1060 |
+
factor = cast(math.pow(10, decimals), config.floatx())
|
| 1061 |
+
x = cast(x, config.floatx())
|
| 1062 |
+
x = jnp.multiply(x, factor)
|
| 1063 |
+
x = jnp.round(x)
|
| 1064 |
+
x = jnp.divide(x, factor)
|
| 1065 |
+
return cast(x, x_dtype)
|
| 1066 |
+
else:
|
| 1067 |
+
return jnp.round(x, decimals=decimals)
|
| 1068 |
+
|
| 1069 |
+
|
| 1070 |
+
def tile(x, repeats):
|
| 1071 |
+
return jnp.tile(x, repeats)
|
| 1072 |
+
|
| 1073 |
+
|
| 1074 |
+
def trace(x, offset=0, axis1=0, axis2=1):
|
| 1075 |
+
x = convert_to_tensor(x)
|
| 1076 |
+
dtype = None
|
| 1077 |
+
# TODO: Remove the condition of uint8 and uint16 once we have jax>=0.4.27
|
| 1078 |
+
# for both CPU & GPU environments.
|
| 1079 |
+
# uint8 and uint16 will be casted to uint32 when jax>=0.4.27 but to int32
|
| 1080 |
+
# otherwise.
|
| 1081 |
+
if standardize_dtype(x.dtype) in ("bool", "uint8", "uint16"):
|
| 1082 |
+
dtype = "int32"
|
| 1083 |
+
return jnp.trace(x, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
|
| 1084 |
+
|
| 1085 |
+
|
| 1086 |
+
def tri(N, M=None, k=0, dtype=None):
|
| 1087 |
+
dtype = dtype or config.floatx()
|
| 1088 |
+
return jnp.tri(N, M=M, k=k, dtype=dtype)
|
| 1089 |
+
|
| 1090 |
+
|
| 1091 |
+
def tril(x, k=0):
|
| 1092 |
+
x = convert_to_tensor(x)
|
| 1093 |
+
return jnp.tril(x, k=k)
|
| 1094 |
+
|
| 1095 |
+
|
| 1096 |
+
def triu(x, k=0):
|
| 1097 |
+
x = convert_to_tensor(x)
|
| 1098 |
+
return jnp.triu(x, k=k)
|
| 1099 |
+
|
| 1100 |
+
|
| 1101 |
+
def trunc(x):
|
| 1102 |
+
x = convert_to_tensor(x)
|
| 1103 |
+
dtype = standardize_dtype(x.dtype)
|
| 1104 |
+
if "int" in dtype or "bool" == dtype:
|
| 1105 |
+
return x
|
| 1106 |
+
return jnp.trunc(x)
|
| 1107 |
+
|
| 1108 |
+
|
| 1109 |
+
def vdot(x1, x2):
|
| 1110 |
+
x1 = convert_to_tensor(x1)
|
| 1111 |
+
x2 = convert_to_tensor(x2)
|
| 1112 |
+
return jnp.vdot(x1, x2)
|
| 1113 |
+
|
| 1114 |
+
|
| 1115 |
+
def inner(x1, x2):
|
| 1116 |
+
x1 = convert_to_tensor(x1)
|
| 1117 |
+
x2 = convert_to_tensor(x2)
|
| 1118 |
+
return jnp.inner(x1, x2)
|
| 1119 |
+
|
| 1120 |
+
|
| 1121 |
+
def vstack(xs):
|
| 1122 |
+
return jnp.vstack(xs)
|
| 1123 |
+
|
| 1124 |
+
|
| 1125 |
+
def vectorize(pyfunc, *, excluded=None, signature=None):
|
| 1126 |
+
if excluded is None:
|
| 1127 |
+
excluded = set()
|
| 1128 |
+
return jnp.vectorize(pyfunc, excluded=excluded, signature=signature)
|
| 1129 |
+
|
| 1130 |
+
|
| 1131 |
+
def where(condition, x1, x2):
|
| 1132 |
+
return jnp.where(condition, x1, x2)
|
| 1133 |
+
|
| 1134 |
+
|
| 1135 |
+
@sparse.elementwise_division
|
| 1136 |
+
def divide(x1, x2):
|
| 1137 |
+
x1 = convert_to_tensor(x1)
|
| 1138 |
+
x2 = convert_to_tensor(x2)
|
| 1139 |
+
return jnp.divide(x1, x2)
|
| 1140 |
+
|
| 1141 |
+
|
| 1142 |
+
def divide_no_nan(x1, x2):
|
| 1143 |
+
x1 = convert_to_tensor(x1)
|
| 1144 |
+
x2 = convert_to_tensor(x2)
|
| 1145 |
+
safe_x2 = jnp.where(x2 == 0, 1, x2)
|
| 1146 |
+
return jnp.where(x2 == 0, 0, jnp.divide(x1, safe_x2))
|
| 1147 |
+
|
| 1148 |
+
|
| 1149 |
+
def true_divide(x1, x2):
|
| 1150 |
+
return divide(x1, x2)
|
| 1151 |
+
|
| 1152 |
+
|
| 1153 |
+
def power(x1, x2):
|
| 1154 |
+
x1 = convert_to_tensor(x1)
|
| 1155 |
+
x2 = convert_to_tensor(x2)
|
| 1156 |
+
return jnp.power(x1, x2)
|
| 1157 |
+
|
| 1158 |
+
|
| 1159 |
+
@sparse.elementwise_unary(linear=True)
|
| 1160 |
+
def negative(x):
|
| 1161 |
+
x = convert_to_tensor(x)
|
| 1162 |
+
return jnp.negative(x)
|
| 1163 |
+
|
| 1164 |
+
|
| 1165 |
+
@sparse.elementwise_unary(linear=False)
|
| 1166 |
+
def square(x):
|
| 1167 |
+
x = convert_to_tensor(x)
|
| 1168 |
+
return jnp.square(x)
|
| 1169 |
+
|
| 1170 |
+
|
| 1171 |
+
@sparse.elementwise_unary(linear=False)
|
| 1172 |
+
def sqrt(x):
|
| 1173 |
+
x = convert_to_tensor(x)
|
| 1174 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 1175 |
+
x = cast(x, config.floatx())
|
| 1176 |
+
return jnp.sqrt(x)
|
| 1177 |
+
|
| 1178 |
+
|
| 1179 |
+
def squeeze(x, axis=None):
|
| 1180 |
+
if isinstance(x, jax_sparse.BCOO):
|
| 1181 |
+
if axis is None:
|
| 1182 |
+
axis = tuple(i for i, d in enumerate(x.shape) if d == 1)
|
| 1183 |
+
axis = to_tuple_or_list(axis)
|
| 1184 |
+
return jax_sparse.bcoo_squeeze(x, dimensions=axis)
|
| 1185 |
+
return jnp.squeeze(x, axis=axis)
|
| 1186 |
+
|
| 1187 |
+
|
| 1188 |
+
def transpose(x, axes=None):
|
| 1189 |
+
x = convert_to_tensor(x)
|
| 1190 |
+
if isinstance(x, jax_sparse.BCOO):
|
| 1191 |
+
num_dims = len(x.shape)
|
| 1192 |
+
if axes is None:
|
| 1193 |
+
permutation = tuple(range(num_dims)[::-1])
|
| 1194 |
+
else:
|
| 1195 |
+
permutation = []
|
| 1196 |
+
for a in axes:
|
| 1197 |
+
a = canonicalize_axis(a, num_dims)
|
| 1198 |
+
permutation.append(a)
|
| 1199 |
+
return jax_sparse.bcoo_transpose(x, permutation=permutation)
|
| 1200 |
+
return jnp.transpose(x, axes=axes)
|
| 1201 |
+
|
| 1202 |
+
|
| 1203 |
+
def var(x, axis=None, keepdims=False):
|
| 1204 |
+
x = convert_to_tensor(x)
|
| 1205 |
+
# `jnp.var` does not handle low precision (e.g., float16) overflow
|
| 1206 |
+
# correctly, so we compute with float32 and cast back to the original type.
|
| 1207 |
+
compute_dtype = dtypes.result_type(x.dtype, "float32")
|
| 1208 |
+
result_dtype = dtypes.result_type(x.dtype, float)
|
| 1209 |
+
return cast(
|
| 1210 |
+
jnp.var(x, axis=axis, keepdims=keepdims, dtype=compute_dtype),
|
| 1211 |
+
result_dtype,
|
| 1212 |
+
)
|
| 1213 |
+
|
| 1214 |
+
|
| 1215 |
+
def sum(x, axis=None, keepdims=False):
|
| 1216 |
+
x = convert_to_tensor(x)
|
| 1217 |
+
if isinstance(x, jax_sparse.BCOO):
|
| 1218 |
+
if axis is None:
|
| 1219 |
+
axis = tuple(range(len(x.shape)))
|
| 1220 |
+
(
|
| 1221 |
+
canonical_axis,
|
| 1222 |
+
keep_dims_shape,
|
| 1223 |
+
broadcast_dimensions,
|
| 1224 |
+
) = sparse.axis_shape_dims_for_broadcast_in_dim(
|
| 1225 |
+
axis, x.shape, insert_dims=False
|
| 1226 |
+
)
|
| 1227 |
+
output = jax_sparse.bcoo_reduce_sum(x, axes=canonical_axis)
|
| 1228 |
+
if keepdims:
|
| 1229 |
+
# `bcoo_reduce_sum` does not support keepdims, neither does
|
| 1230 |
+
# sparsify(jnp.sum), so we recreate the empty dimensions.
|
| 1231 |
+
output = jax_sparse.bcoo_broadcast_in_dim(
|
| 1232 |
+
output,
|
| 1233 |
+
shape=keep_dims_shape,
|
| 1234 |
+
broadcast_dimensions=broadcast_dimensions,
|
| 1235 |
+
)
|
| 1236 |
+
return output
|
| 1237 |
+
return jnp.sum(x, axis=axis, keepdims=keepdims)
|
| 1238 |
+
|
| 1239 |
+
|
| 1240 |
+
def eye(N, M=None, k=0, dtype=None):
|
| 1241 |
+
dtype = dtype or config.floatx()
|
| 1242 |
+
return jnp.eye(N, M=M, k=k, dtype=dtype)
|
| 1243 |
+
|
| 1244 |
+
|
| 1245 |
+
def floor_divide(x1, x2):
|
| 1246 |
+
x1 = convert_to_tensor(x1)
|
| 1247 |
+
x2 = convert_to_tensor(x2)
|
| 1248 |
+
return jnp.floor_divide(x1, x2)
|
| 1249 |
+
|
| 1250 |
+
|
| 1251 |
+
def logical_xor(x1, x2):
|
| 1252 |
+
x1 = convert_to_tensor(x1)
|
| 1253 |
+
x2 = convert_to_tensor(x2)
|
| 1254 |
+
return jnp.logical_xor(x1, x2)
|
| 1255 |
+
|
| 1256 |
+
|
| 1257 |
+
def correlate(x1, x2, mode="valid"):
|
| 1258 |
+
x1 = convert_to_tensor(x1)
|
| 1259 |
+
x2 = convert_to_tensor(x2)
|
| 1260 |
+
return jnp.correlate(x1, x2, mode)
|
| 1261 |
+
|
| 1262 |
+
|
| 1263 |
+
def select(condlist, choicelist, default=0):
|
| 1264 |
+
return jnp.select(condlist, choicelist, default=default)
|
| 1265 |
+
|
| 1266 |
+
|
| 1267 |
+
def slogdet(x):
|
| 1268 |
+
x = convert_to_tensor(x)
|
| 1269 |
+
return tuple(jnp.linalg.slogdet(x))
|
| 1270 |
+
|
| 1271 |
+
|
| 1272 |
+
def argpartition(x, kth, axis=-1):
|
| 1273 |
+
return jnp.argpartition(x, kth, axis)
|
| 1274 |
+
|
| 1275 |
+
|
| 1276 |
+
def histogram(x, bins, range):
|
| 1277 |
+
return jnp.histogram(x, bins=bins, range=range)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/optimizer.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""A class for JAX specific optimizer logic.
|
| 2 |
+
|
| 3 |
+
Its purpose is to route around statelessness
|
| 4 |
+
requirements in cond ops used for EMA handling
|
| 5 |
+
and gradient accumulation handling. We do this
|
| 6 |
+
by skipping conditionals entirely.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import jax
|
| 10 |
+
from jax import numpy as jnp
|
| 11 |
+
|
| 12 |
+
from keras.src.optimizers import base_optimizer
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class JaxOptimizer(base_optimizer.BaseOptimizer):
|
| 16 |
+
def _backend_apply_gradients(self, grads, trainable_variables):
|
| 17 |
+
if self.gradient_accumulation_steps:
|
| 18 |
+
is_update_step = (
|
| 19 |
+
self._iterations + 1
|
| 20 |
+
) % self.gradient_accumulation_steps == 0
|
| 21 |
+
steps = self.gradient_accumulation_steps
|
| 22 |
+
|
| 23 |
+
current_trainable_vars_value = [
|
| 24 |
+
v.value for v in trainable_variables
|
| 25 |
+
]
|
| 26 |
+
current_optimizer_vars_value = [v.value for v in self.variables]
|
| 27 |
+
|
| 28 |
+
# `trainable_variables` might have been filtered in previous
|
| 29 |
+
# processing steps, so we need to ensure the correct mapping between
|
| 30 |
+
# `self._accumulated_gradients` and `trainable_variables`
|
| 31 |
+
acc_grads = [
|
| 32 |
+
self._accumulated_gradients[self._get_variable_index(v)]
|
| 33 |
+
for v in trainable_variables
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
new_g_accs = jax.lax.cond(
|
| 37 |
+
is_update_step,
|
| 38 |
+
lambda: [jnp.zeros(g.shape, dtype=g.dtype) for g in acc_grads],
|
| 39 |
+
lambda: [g + acc_g for g, acc_g in zip(grads, acc_grads)],
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
grads = jax.lax.cond(
|
| 43 |
+
is_update_step,
|
| 44 |
+
lambda: [
|
| 45 |
+
(g + acc_g) / steps for g, acc_g in zip(grads, acc_grads)
|
| 46 |
+
],
|
| 47 |
+
lambda: list(grads),
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
# Apply clipping and weight decay.
|
| 51 |
+
grads = self._clip_gradients(grads)
|
| 52 |
+
self._apply_weight_decay(trainable_variables)
|
| 53 |
+
|
| 54 |
+
self._backend_update_step(
|
| 55 |
+
grads, trainable_variables, self.learning_rate
|
| 56 |
+
)
|
| 57 |
+
new_trainable_vars = jax.lax.cond(
|
| 58 |
+
is_update_step,
|
| 59 |
+
lambda: [v.value for v in trainable_variables],
|
| 60 |
+
lambda: current_trainable_vars_value,
|
| 61 |
+
)
|
| 62 |
+
new_opt_vars = jax.lax.cond(
|
| 63 |
+
is_update_step,
|
| 64 |
+
lambda: [v.value for v in self.variables],
|
| 65 |
+
lambda: current_optimizer_vars_value,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
for value, v in zip(new_trainable_vars, trainable_variables):
|
| 69 |
+
v.assign(value)
|
| 70 |
+
|
| 71 |
+
for value, v in zip(new_opt_vars, self.variables):
|
| 72 |
+
v.assign(value)
|
| 73 |
+
|
| 74 |
+
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
|
| 75 |
+
g_acc.assign(n_g_acc)
|
| 76 |
+
|
| 77 |
+
else:
|
| 78 |
+
# Apply clipping and weight decay.
|
| 79 |
+
grads = self._clip_gradients(grads)
|
| 80 |
+
self._apply_weight_decay(trainable_variables)
|
| 81 |
+
|
| 82 |
+
self._backend_update_step(
|
| 83 |
+
grads, trainable_variables, self.learning_rate
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
if self.use_ema:
|
| 87 |
+
self._update_model_variables_moving_average(
|
| 88 |
+
self._trainable_variables
|
| 89 |
+
)
|
| 90 |
+
if self.ema_overwrite_frequency is not None:
|
| 91 |
+
should_overwrite_model_vars = (
|
| 92 |
+
self.iterations + 1
|
| 93 |
+
) % self.ema_overwrite_frequency == 0
|
| 94 |
+
should_overwrite_model_vars_int = (
|
| 95 |
+
should_overwrite_model_vars.astype("int32")
|
| 96 |
+
)
|
| 97 |
+
should_not_overwrite_model_vars_int = jnp.logical_not(
|
| 98 |
+
should_overwrite_model_vars
|
| 99 |
+
).astype("int32")
|
| 100 |
+
current_trainable_vars_value = [
|
| 101 |
+
v.value for v in self._trainable_variables
|
| 102 |
+
]
|
| 103 |
+
for var, average_var in zip(
|
| 104 |
+
self._trainable_variables,
|
| 105 |
+
self._model_variables_moving_average,
|
| 106 |
+
):
|
| 107 |
+
var.assign(
|
| 108 |
+
average_var * should_overwrite_model_vars_int
|
| 109 |
+
+ var.value * should_not_overwrite_model_vars_int
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
self._iterations.assign_add(1)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/rnn.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
|
| 3 |
+
from jax import lax
|
| 4 |
+
from jax import numpy as jnp
|
| 5 |
+
|
| 6 |
+
from keras.src import tree
|
| 7 |
+
from keras.src.backend.common import stateless_scope
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def rnn(
|
| 11 |
+
step_function,
|
| 12 |
+
inputs,
|
| 13 |
+
initial_states,
|
| 14 |
+
go_backwards=False,
|
| 15 |
+
mask=None,
|
| 16 |
+
constants=None,
|
| 17 |
+
unroll=False,
|
| 18 |
+
input_length=None,
|
| 19 |
+
time_major=False,
|
| 20 |
+
zero_output_for_mask=False,
|
| 21 |
+
return_all_outputs=True,
|
| 22 |
+
):
|
| 23 |
+
def swap_batch_timestep(input_t):
|
| 24 |
+
# Swap the batch and timestep dim for the incoming tensor.
|
| 25 |
+
axes = list(range(len(input_t.shape)))
|
| 26 |
+
axes[0], axes[1] = 1, 0
|
| 27 |
+
return jnp.transpose(input_t, axes)
|
| 28 |
+
|
| 29 |
+
if not time_major:
|
| 30 |
+
inputs = tree.map_structure(swap_batch_timestep, inputs)
|
| 31 |
+
|
| 32 |
+
flattened_inputs = tree.flatten(inputs)
|
| 33 |
+
time_steps = flattened_inputs[0].shape[0]
|
| 34 |
+
|
| 35 |
+
if mask is not None:
|
| 36 |
+
if mask.dtype != "bool":
|
| 37 |
+
mask = mask.astype("bool")
|
| 38 |
+
if len(mask.shape) == 2:
|
| 39 |
+
mask = jnp.expand_dims(mask, axis=-1)
|
| 40 |
+
if not time_major:
|
| 41 |
+
mask = swap_batch_timestep(mask)
|
| 42 |
+
|
| 43 |
+
if constants is None:
|
| 44 |
+
constants = []
|
| 45 |
+
|
| 46 |
+
def _expand_mask(mask_t, input_t, fixed_dim=1):
|
| 47 |
+
if tree.is_nested(mask_t):
|
| 48 |
+
raise ValueError(
|
| 49 |
+
f"mask_t is expected to be tensor, but got {mask_t}"
|
| 50 |
+
)
|
| 51 |
+
if tree.is_nested(input_t):
|
| 52 |
+
raise ValueError(
|
| 53 |
+
f"input_t is expected to be tensor, but got {input_t}"
|
| 54 |
+
)
|
| 55 |
+
rank_diff = len(input_t.shape) - len(mask_t.shape)
|
| 56 |
+
for _ in range(rank_diff):
|
| 57 |
+
mask_t = jnp.expand_dims(mask_t, -1)
|
| 58 |
+
multiples = [1] * fixed_dim + list(input_t.shape[fixed_dim:])
|
| 59 |
+
return jnp.tile(mask_t, multiples)
|
| 60 |
+
|
| 61 |
+
if unroll:
|
| 62 |
+
if not time_steps:
|
| 63 |
+
raise ValueError("Unrolling requires a fixed number of timesteps.")
|
| 64 |
+
states = tuple(initial_states)
|
| 65 |
+
successive_states = []
|
| 66 |
+
successive_outputs = []
|
| 67 |
+
|
| 68 |
+
# Process the input tensors. The input tensor need to be split on the
|
| 69 |
+
# time_step dim, and reverse if go_backwards is True. In the case of
|
| 70 |
+
# nested input, the input is flattened and then transformed
|
| 71 |
+
# individually. The result of this will be a tuple of lists, each of
|
| 72 |
+
# the item in tuple is list of the tensor with shape (batch, feature)
|
| 73 |
+
def _process_single_input_t(input_t):
|
| 74 |
+
input_t = unstack(input_t) # unstack for time_step dim
|
| 75 |
+
if go_backwards:
|
| 76 |
+
input_t.reverse()
|
| 77 |
+
return input_t
|
| 78 |
+
|
| 79 |
+
if tree.is_nested(inputs):
|
| 80 |
+
processed_input = tree.map_structure(
|
| 81 |
+
_process_single_input_t, inputs
|
| 82 |
+
)
|
| 83 |
+
else:
|
| 84 |
+
processed_input = (_process_single_input_t(inputs),)
|
| 85 |
+
|
| 86 |
+
def _get_input_tensor(time):
|
| 87 |
+
inp = [t_[time] for t_ in processed_input]
|
| 88 |
+
return tree.pack_sequence_as(inputs, inp)
|
| 89 |
+
|
| 90 |
+
if mask is not None:
|
| 91 |
+
mask_list = unstack(mask)
|
| 92 |
+
if go_backwards:
|
| 93 |
+
mask_list.reverse()
|
| 94 |
+
|
| 95 |
+
for i in range(time_steps):
|
| 96 |
+
inp = _get_input_tensor(i)
|
| 97 |
+
mask_t = mask_list[i]
|
| 98 |
+
output, new_states = step_function(
|
| 99 |
+
inp, tuple(states) + tuple(constants)
|
| 100 |
+
)
|
| 101 |
+
tiled_mask_t = _expand_mask(mask_t, output)
|
| 102 |
+
|
| 103 |
+
if not successive_outputs:
|
| 104 |
+
prev_output = jnp.zeros_like(output)
|
| 105 |
+
else:
|
| 106 |
+
prev_output = successive_outputs[-1]
|
| 107 |
+
|
| 108 |
+
output = jnp.where(tiled_mask_t, output, prev_output)
|
| 109 |
+
|
| 110 |
+
flat_states = tree.flatten(states)
|
| 111 |
+
flat_new_states = tree.flatten(new_states)
|
| 112 |
+
tiled_mask_t = tuple(
|
| 113 |
+
_expand_mask(mask_t, s) for s in flat_states
|
| 114 |
+
)
|
| 115 |
+
flat_final_states = tuple(
|
| 116 |
+
jnp.where(m, s, ps)
|
| 117 |
+
for m, s, ps in zip(
|
| 118 |
+
tiled_mask_t, flat_new_states, flat_states
|
| 119 |
+
)
|
| 120 |
+
)
|
| 121 |
+
states = tree.pack_sequence_as(states, flat_final_states)
|
| 122 |
+
|
| 123 |
+
if return_all_outputs:
|
| 124 |
+
successive_outputs.append(output)
|
| 125 |
+
successive_states.append(states)
|
| 126 |
+
else:
|
| 127 |
+
successive_outputs = [output]
|
| 128 |
+
successive_states = [states]
|
| 129 |
+
last_output = successive_outputs[-1]
|
| 130 |
+
new_states = successive_states[-1]
|
| 131 |
+
outputs = jnp.stack(successive_outputs)
|
| 132 |
+
|
| 133 |
+
else: # mask is None
|
| 134 |
+
for i in range(time_steps):
|
| 135 |
+
inp = _get_input_tensor(i)
|
| 136 |
+
output, states = step_function(
|
| 137 |
+
inp, tuple(states) + tuple(constants)
|
| 138 |
+
)
|
| 139 |
+
if return_all_outputs:
|
| 140 |
+
successive_outputs.append(output)
|
| 141 |
+
successive_states.append(states)
|
| 142 |
+
else:
|
| 143 |
+
successive_outputs = [output]
|
| 144 |
+
successive_states = [states]
|
| 145 |
+
last_output = successive_outputs[-1]
|
| 146 |
+
new_states = successive_states[-1]
|
| 147 |
+
outputs = jnp.stack(successive_outputs)
|
| 148 |
+
|
| 149 |
+
else: # Unroll == False
|
| 150 |
+
if mask is not None:
|
| 151 |
+
|
| 152 |
+
def _step(states, current_input):
|
| 153 |
+
current_input, current_mask = current_input
|
| 154 |
+
is_masked = jnp.all(
|
| 155 |
+
jnp.logical_not(current_mask), axis=-1, keepdims=True
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
output_t, new_states = step_function(current_input, states)
|
| 159 |
+
|
| 160 |
+
if zero_output_for_mask:
|
| 161 |
+
masked_outs = jnp.where(
|
| 162 |
+
is_masked, jnp.zeros_like(output_t), output_t
|
| 163 |
+
)
|
| 164 |
+
else:
|
| 165 |
+
# Assume the first state is the previous output.
|
| 166 |
+
output_tm1 = states[0]
|
| 167 |
+
masked_outs = jnp.where(is_masked, output_tm1, output_t)
|
| 168 |
+
|
| 169 |
+
new_states = [
|
| 170 |
+
jnp.where(is_masked, s, ns)
|
| 171 |
+
for s, ns in zip(states, new_states)
|
| 172 |
+
]
|
| 173 |
+
return (new_states, masked_outs)
|
| 174 |
+
|
| 175 |
+
scan_xs = (inputs, mask)
|
| 176 |
+
|
| 177 |
+
else:
|
| 178 |
+
|
| 179 |
+
def _step(states, current_input):
|
| 180 |
+
output_t, new_states = step_function(current_input, states)
|
| 181 |
+
return new_states, output_t
|
| 182 |
+
|
| 183 |
+
scan_xs = inputs
|
| 184 |
+
|
| 185 |
+
if stateless_scope.in_stateless_scope():
|
| 186 |
+
# Reuse the existing parent stateless scope.
|
| 187 |
+
scope = contextlib.nullcontext()
|
| 188 |
+
else:
|
| 189 |
+
scope = stateless_scope.StatelessScope()
|
| 190 |
+
with scope:
|
| 191 |
+
# We must use a stateless scope because `scan` will involve
|
| 192 |
+
# JAX tracing -- any variable update at this stage would
|
| 193 |
+
# be a leak.
|
| 194 |
+
new_states, outputs = lax.scan(
|
| 195 |
+
f=_step,
|
| 196 |
+
init=initial_states,
|
| 197 |
+
xs=scan_xs,
|
| 198 |
+
reverse=go_backwards,
|
| 199 |
+
)
|
| 200 |
+
if go_backwards:
|
| 201 |
+
outputs = jnp.flip(outputs, axis=0)
|
| 202 |
+
last_output = outputs[-1]
|
| 203 |
+
|
| 204 |
+
if not time_major:
|
| 205 |
+
outputs = tree.map_structure(swap_batch_timestep, outputs)
|
| 206 |
+
|
| 207 |
+
return last_output, outputs, new_states
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def cudnn_ok(*args, **kwargs):
|
| 211 |
+
return False
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def lstm(*args, **kwargs):
|
| 215 |
+
raise NotImplementedError
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def gru(*args, **kwargs):
|
| 219 |
+
raise NotImplementedError
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def unstack(x, axis=0):
|
| 223 |
+
return [
|
| 224 |
+
lax.index_in_dim(x, i, axis, keepdims=False)
|
| 225 |
+
for i in range(x.shape[axis])
|
| 226 |
+
]
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/jax/tensorboard.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.utils.module_utils import jax
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def start_trace(logdir):
|
| 5 |
+
if logdir:
|
| 6 |
+
jax.profiler.start_trace(logdir)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def stop_trace(save):
|
| 10 |
+
if save:
|
| 11 |
+
jax.profiler.stop_trace()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def start_batch_trace(batch):
|
| 15 |
+
batch_trace_context = jax.profiler.TraceAnnotation(
|
| 16 |
+
f"Profiled batch {batch}"
|
| 17 |
+
)
|
| 18 |
+
batch_trace_context.__enter__()
|
| 19 |
+
return batch_trace_context
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def stop_batch_trace(batch_trace_context):
|
| 23 |
+
batch_trace_context.__exit__(None, None, None)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/openvino/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.08 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/openvino/__pycache__/nn.cpython-310.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/openvino/random.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import openvino.runtime.opset14 as ov_opset
|
| 3 |
+
from openvino import Type
|
| 4 |
+
|
| 5 |
+
from keras.src.backend.config import floatx
|
| 6 |
+
from keras.src.backend.openvino.core import OPENVINO_DTYPES
|
| 7 |
+
from keras.src.backend.openvino.core import OpenVINOKerasTensor
|
| 8 |
+
from keras.src.backend.openvino.core import convert_to_numpy
|
| 9 |
+
from keras.src.random.seed_generator import SeedGenerator
|
| 10 |
+
from keras.src.random.seed_generator import draw_seed
|
| 11 |
+
from keras.src.random.seed_generator import make_default_seed
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
|
| 15 |
+
dtype = dtype or floatx()
|
| 16 |
+
seed = draw_seed(seed)
|
| 17 |
+
rng = np.random.default_rng(seed.data)
|
| 18 |
+
normal_const = rng.normal(size=shape, loc=mean, scale=stddev).astype(dtype)
|
| 19 |
+
return OpenVINOKerasTensor(ov_opset.constant(normal_const).output(0))
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
|
| 23 |
+
dtype = dtype or floatx()
|
| 24 |
+
ov_type = OPENVINO_DTYPES[dtype]
|
| 25 |
+
seed = draw_seed(seed)
|
| 26 |
+
if isinstance(seed, OpenVINOKerasTensor):
|
| 27 |
+
seed1, seed2 = convert_to_numpy(seed)
|
| 28 |
+
else:
|
| 29 |
+
seed1, seed2 = draw_seed(seed).data
|
| 30 |
+
minval_const = ov_opset.constant(minval, dtype=dtype)
|
| 31 |
+
maxval_const = ov_opset.constant(maxval, dtype=dtype)
|
| 32 |
+
if isinstance(shape, tuple):
|
| 33 |
+
shape = list(shape)
|
| 34 |
+
output_shape_const = ov_opset.constant(shape, dtype=Type.i32)
|
| 35 |
+
random_uniform = ov_opset.random_uniform(
|
| 36 |
+
output_shape_const, minval_const, maxval_const, ov_type, seed1, seed2
|
| 37 |
+
)
|
| 38 |
+
return OpenVINOKerasTensor(random_uniform.output(0))
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def categorical(logits, num_samples, dtype="int64", seed=None):
|
| 42 |
+
raise NotImplementedError(
|
| 43 |
+
"`categorical` is not supported with openvino backend"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def randint(shape, minval, maxval, dtype="int32", seed=None):
|
| 48 |
+
raise NotImplementedError(
|
| 49 |
+
"`randint` is not supported with openvino backend"
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
|
| 54 |
+
dtype = dtype or floatx()
|
| 55 |
+
seed = draw_seed(seed)
|
| 56 |
+
rng = np.random.default_rng(seed.data)
|
| 57 |
+
|
| 58 |
+
lower_bound = mean - 2 * stddev
|
| 59 |
+
upper_bound = mean + 2 * stddev
|
| 60 |
+
|
| 61 |
+
flat_shape = np.prod(shape)
|
| 62 |
+
random_numbers = np.empty(0)
|
| 63 |
+
|
| 64 |
+
# loop until we have enough valid numbers to fill our desired shape
|
| 65 |
+
while random_numbers.shape[0] < flat_shape:
|
| 66 |
+
# Generate a batch of random numbers from a normal distribution
|
| 67 |
+
batch = rng.normal(loc=mean, scale=stddev, size=flat_shape)
|
| 68 |
+
|
| 69 |
+
# Filter the numbers to keep only those within the specified bounds
|
| 70 |
+
valid = batch[(batch >= lower_bound) & (batch <= upper_bound)]
|
| 71 |
+
|
| 72 |
+
# Append the valid numbers to the result array
|
| 73 |
+
random_numbers = np.append(random_numbers, valid)
|
| 74 |
+
|
| 75 |
+
# Truncate the result array to the desired size and reshape it
|
| 76 |
+
np_array_res = random_numbers[:flat_shape].astype(dtype).reshape(shape)
|
| 77 |
+
return OpenVINOKerasTensor(ov_opset.constant(np_array_res).output(0))
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def dropout(inputs, rate, noise_shape=None, seed=None):
|
| 81 |
+
raise NotImplementedError(
|
| 82 |
+
"`dropout` is not supported with openvino backend"
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def shuffle(x, axis=0, seed=None):
|
| 87 |
+
raise NotImplementedError(
|
| 88 |
+
"`shuffle` is not supported with openvino backend"
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def gamma(shape, alpha, dtype=None, seed=None):
|
| 93 |
+
raise NotImplementedError("`gamma` is not supported with openvino backend")
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def binomial(shape, counts, probabilities, dtype=None, seed=None):
|
| 97 |
+
raise NotImplementedError(
|
| 98 |
+
"`binomial` is not supported with openvino backend"
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def beta(shape, alpha, beta, dtype=None, seed=None):
|
| 103 |
+
raise NotImplementedError("`beta` is not supported with openvino backend")
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/openvino/trainer.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import openvino as ov
|
| 3 |
+
import openvino.runtime.opset14 as ov_opset
|
| 4 |
+
|
| 5 |
+
from keras.src import backend
|
| 6 |
+
from keras.src import callbacks as callbacks_module
|
| 7 |
+
from keras.src import tree
|
| 8 |
+
from keras.src.backend.openvino.core import OPENVINO_DTYPES
|
| 9 |
+
from keras.src.backend.openvino.core import OpenVINOKerasTensor
|
| 10 |
+
from keras.src.backend.openvino.core import get_device
|
| 11 |
+
from keras.src.trainers import trainer as base_trainer
|
| 12 |
+
from keras.src.trainers.data_adapters import data_adapter_utils
|
| 13 |
+
from keras.src.trainers.epoch_iterator import EpochIterator
|
| 14 |
+
from keras.src.utils import traceback_utils
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class OpenVINOTrainer(base_trainer.Trainer):
|
| 18 |
+
def __init__(self):
|
| 19 |
+
super().__init__()
|
| 20 |
+
self.test_function = None
|
| 21 |
+
self.predict_function = None
|
| 22 |
+
self.ov_compiled_model = None
|
| 23 |
+
self.ov_device = None
|
| 24 |
+
self.struct_params = None
|
| 25 |
+
self.struct_outputs = None
|
| 26 |
+
|
| 27 |
+
def _unpack_singleton(self, x):
|
| 28 |
+
if isinstance(x, (list, tuple)) and len(x) == 1:
|
| 29 |
+
return x[0]
|
| 30 |
+
return x
|
| 31 |
+
|
| 32 |
+
def test_step(self, data):
|
| 33 |
+
raise NotImplementedError(
|
| 34 |
+
"`test_step` is not supported with openvino backend"
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
def predict_step(self, data):
|
| 38 |
+
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
|
| 39 |
+
ov_compiled_model = self._get_compiled_model(x)
|
| 40 |
+
flatten_x = tree.flatten(x)
|
| 41 |
+
y_pred = ov_compiled_model(flatten_x)
|
| 42 |
+
# recover structure of the model output
|
| 43 |
+
y_pred = self._unpack_singleton(
|
| 44 |
+
tree.pack_sequence_as(self.struct_outputs, y_pred.to_tuple())
|
| 45 |
+
)
|
| 46 |
+
return y_pred
|
| 47 |
+
|
| 48 |
+
def make_test_function(self, force=False):
|
| 49 |
+
if self.test_function is not None and not force:
|
| 50 |
+
return self.test_function
|
| 51 |
+
|
| 52 |
+
def one_test_step(data):
|
| 53 |
+
data = data[0]
|
| 54 |
+
return self.test_step(data)
|
| 55 |
+
|
| 56 |
+
def multi_test_steps(data):
|
| 57 |
+
for single_step_data in data:
|
| 58 |
+
logs = one_test_step([single_step_data])
|
| 59 |
+
return logs
|
| 60 |
+
|
| 61 |
+
if self.steps_per_execution > 1:
|
| 62 |
+
test_step = multi_test_steps
|
| 63 |
+
else:
|
| 64 |
+
test_step = one_test_step
|
| 65 |
+
|
| 66 |
+
self.test_function = test_step
|
| 67 |
+
|
| 68 |
+
def _parameterize_data(self, data):
|
| 69 |
+
if isinstance(data, (list, tuple)):
|
| 70 |
+
parametrize_data = []
|
| 71 |
+
for elem in data:
|
| 72 |
+
param_elem = self._parameterize_data(elem)
|
| 73 |
+
parametrize_data.append(param_elem)
|
| 74 |
+
elif isinstance(data, dict):
|
| 75 |
+
parametrize_data = dict()
|
| 76 |
+
for elem_name, elem in data.items():
|
| 77 |
+
param_elem = self._parameterize_data(elem)
|
| 78 |
+
parametrize_data[elem_name] = param_elem
|
| 79 |
+
elif isinstance(data, np.ndarray) or np.isscalar(data):
|
| 80 |
+
ov_type = OPENVINO_DTYPES[str(data.dtype)]
|
| 81 |
+
ov_shape = list(data.shape)
|
| 82 |
+
param = ov_opset.parameter(shape=ov_shape, dtype=ov_type)
|
| 83 |
+
parametrize_data = OpenVINOKerasTensor(param.output(0))
|
| 84 |
+
elif isinstance(data, int):
|
| 85 |
+
param = ov_opset.parameter(shape=[], dtype=ov.Type.i32)
|
| 86 |
+
parametrize_data = OpenVINOKerasTensor(param.output(0))
|
| 87 |
+
elif isinstance(data, float):
|
| 88 |
+
param = ov_opset.parameter(shape=[], dtype=ov.Type.f32)
|
| 89 |
+
parametrize_data = OpenVINOKerasTensor(param.output(0))
|
| 90 |
+
else:
|
| 91 |
+
raise "Unknown type of input data {}".format(type(data))
|
| 92 |
+
return parametrize_data
|
| 93 |
+
|
| 94 |
+
def _get_compiled_model(self, data):
|
| 95 |
+
if (
|
| 96 |
+
self.ov_compiled_model is not None
|
| 97 |
+
and get_device() == self.ov_device
|
| 98 |
+
):
|
| 99 |
+
return self.ov_compiled_model
|
| 100 |
+
|
| 101 |
+
# remove the previous cached compiled model if exists
|
| 102 |
+
del self.ov_compiled_model
|
| 103 |
+
|
| 104 |
+
# prepare parameterized input
|
| 105 |
+
self.struct_params = self._parameterize_data(data)
|
| 106 |
+
# construct OpenVINO graph during calling Keras Model
|
| 107 |
+
self.struct_outputs = self(self.struct_params)
|
| 108 |
+
|
| 109 |
+
parameters = []
|
| 110 |
+
for p in tree.flatten(self.struct_params):
|
| 111 |
+
parameters.append(p.output.get_node())
|
| 112 |
+
results = []
|
| 113 |
+
for r in tree.flatten(self.struct_outputs):
|
| 114 |
+
results.append(ov_opset.result(r.output))
|
| 115 |
+
|
| 116 |
+
# prepare compiled model from scratch
|
| 117 |
+
ov_model = ov.Model(results=results, parameters=parameters)
|
| 118 |
+
self.ov_compiled_model = ov.compile_model(ov_model, get_device())
|
| 119 |
+
self.ov_device = get_device()
|
| 120 |
+
return self.ov_compiled_model
|
| 121 |
+
|
| 122 |
+
def make_predict_function(self, force=False):
|
| 123 |
+
if self.predict_function is not None and not force:
|
| 124 |
+
return self.predict_function
|
| 125 |
+
|
| 126 |
+
def one_predict_step(data):
|
| 127 |
+
data = data[0]
|
| 128 |
+
return self.predict_step(data)
|
| 129 |
+
|
| 130 |
+
def multi_predict_steps(data):
|
| 131 |
+
outputs = one_predict_step(data[:1])
|
| 132 |
+
|
| 133 |
+
for single_step_data in data[1:]:
|
| 134 |
+
step_outputs = one_predict_step([single_step_data])
|
| 135 |
+
outputs = tree.map_structure(
|
| 136 |
+
lambda t1, t2: np.concatenate([t1, t2]),
|
| 137 |
+
outputs,
|
| 138 |
+
step_outputs,
|
| 139 |
+
)
|
| 140 |
+
return outputs
|
| 141 |
+
|
| 142 |
+
if self.steps_per_execution > 1:
|
| 143 |
+
predict_step = multi_predict_steps
|
| 144 |
+
else:
|
| 145 |
+
predict_step = one_predict_step
|
| 146 |
+
|
| 147 |
+
self.predict_function = predict_step
|
| 148 |
+
|
| 149 |
+
def fit(
|
| 150 |
+
self,
|
| 151 |
+
x=None,
|
| 152 |
+
y=None,
|
| 153 |
+
batch_size=None,
|
| 154 |
+
epochs=1,
|
| 155 |
+
verbose="auto",
|
| 156 |
+
callbacks=None,
|
| 157 |
+
validation_split=0.0,
|
| 158 |
+
validation_data=None,
|
| 159 |
+
shuffle=True,
|
| 160 |
+
class_weight=None,
|
| 161 |
+
sample_weight=None,
|
| 162 |
+
initial_epoch=0,
|
| 163 |
+
steps_per_epoch=None,
|
| 164 |
+
validation_steps=None,
|
| 165 |
+
validation_batch_size=None,
|
| 166 |
+
validation_freq=1,
|
| 167 |
+
):
|
| 168 |
+
raise NotImplementedError(
|
| 169 |
+
"`fit` is not supported with openvino backend"
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
@traceback_utils.filter_traceback
|
| 173 |
+
def predict(
|
| 174 |
+
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
|
| 175 |
+
):
|
| 176 |
+
# Create an iterator that yields batches of input data.
|
| 177 |
+
epoch_iterator = EpochIterator(
|
| 178 |
+
x=x,
|
| 179 |
+
batch_size=batch_size,
|
| 180 |
+
steps_per_epoch=steps,
|
| 181 |
+
shuffle=False,
|
| 182 |
+
steps_per_execution=self.steps_per_execution,
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
# Container that configures and calls callbacks.
|
| 186 |
+
if not isinstance(callbacks, callbacks_module.CallbackList):
|
| 187 |
+
callbacks = callbacks_module.CallbackList(
|
| 188 |
+
callbacks,
|
| 189 |
+
add_history=True,
|
| 190 |
+
add_progbar=verbose != 0,
|
| 191 |
+
verbose=verbose,
|
| 192 |
+
epochs=1,
|
| 193 |
+
steps=epoch_iterator.num_batches,
|
| 194 |
+
model=self,
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
def append_to_outputs(batch_outputs, outputs):
|
| 198 |
+
if outputs is None:
|
| 199 |
+
outputs = tree.map_structure(
|
| 200 |
+
lambda batch_output: [batch_output],
|
| 201 |
+
batch_outputs,
|
| 202 |
+
)
|
| 203 |
+
else:
|
| 204 |
+
tree.map_structure_up_to(
|
| 205 |
+
batch_outputs,
|
| 206 |
+
lambda output, batch_output: output.append(batch_output),
|
| 207 |
+
outputs,
|
| 208 |
+
batch_outputs,
|
| 209 |
+
)
|
| 210 |
+
return outputs
|
| 211 |
+
|
| 212 |
+
self.make_predict_function()
|
| 213 |
+
self.stop_predicting = False
|
| 214 |
+
callbacks.on_predict_begin()
|
| 215 |
+
outputs = None
|
| 216 |
+
for step, data in epoch_iterator.enumerate_epoch():
|
| 217 |
+
callbacks.on_predict_batch_begin(step)
|
| 218 |
+
batch_outputs = self.predict_function(data)
|
| 219 |
+
outputs = append_to_outputs(batch_outputs, outputs)
|
| 220 |
+
callbacks.on_predict_batch_end(step, {"outputs": batch_outputs})
|
| 221 |
+
if self.stop_predicting:
|
| 222 |
+
break
|
| 223 |
+
callbacks.on_predict_end()
|
| 224 |
+
return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs)
|
| 225 |
+
|
| 226 |
+
@traceback_utils.filter_traceback
|
| 227 |
+
def evaluate(
|
| 228 |
+
self,
|
| 229 |
+
x=None,
|
| 230 |
+
y=None,
|
| 231 |
+
batch_size=None,
|
| 232 |
+
verbose="auto",
|
| 233 |
+
sample_weight=None,
|
| 234 |
+
steps=None,
|
| 235 |
+
callbacks=None,
|
| 236 |
+
return_dict=False,
|
| 237 |
+
**kwargs,
|
| 238 |
+
):
|
| 239 |
+
raise NotImplementedError(
|
| 240 |
+
"`evaluate` is not supported with openvino backend"
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
def train_on_batch(
|
| 244 |
+
self,
|
| 245 |
+
x,
|
| 246 |
+
y=None,
|
| 247 |
+
sample_weight=None,
|
| 248 |
+
class_weight=None,
|
| 249 |
+
return_dict=False,
|
| 250 |
+
):
|
| 251 |
+
raise NotImplementedError(
|
| 252 |
+
"`train_on_batch` is not supported with openvino backend"
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
def test_on_batch(
|
| 256 |
+
self,
|
| 257 |
+
x,
|
| 258 |
+
y=None,
|
| 259 |
+
sample_weight=None,
|
| 260 |
+
return_dict=False,
|
| 261 |
+
):
|
| 262 |
+
raise NotImplementedError(
|
| 263 |
+
"`test_on_batch` is not supported with openvino backend"
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
def predict_on_batch(self, x):
|
| 267 |
+
self.make_predict_function()
|
| 268 |
+
batch_outputs = self.predict_function([(x,)])
|
| 269 |
+
batch_outputs = tree.map_structure(
|
| 270 |
+
backend.convert_to_numpy, batch_outputs
|
| 271 |
+
)
|
| 272 |
+
return batch_outputs
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.backend.tensorflow import core
|
| 2 |
+
from keras.src.backend.tensorflow import distribution_lib
|
| 3 |
+
from keras.src.backend.tensorflow import image
|
| 4 |
+
from keras.src.backend.tensorflow import linalg
|
| 5 |
+
from keras.src.backend.tensorflow import math
|
| 6 |
+
from keras.src.backend.tensorflow import nn
|
| 7 |
+
from keras.src.backend.tensorflow import numpy
|
| 8 |
+
from keras.src.backend.tensorflow import random
|
| 9 |
+
from keras.src.backend.tensorflow import tensorboard
|
| 10 |
+
from keras.src.backend.tensorflow.core import IS_THREAD_SAFE
|
| 11 |
+
from keras.src.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS
|
| 12 |
+
from keras.src.backend.tensorflow.core import Variable
|
| 13 |
+
from keras.src.backend.tensorflow.core import cast
|
| 14 |
+
from keras.src.backend.tensorflow.core import compute_output_spec
|
| 15 |
+
from keras.src.backend.tensorflow.core import cond
|
| 16 |
+
from keras.src.backend.tensorflow.core import convert_to_numpy
|
| 17 |
+
from keras.src.backend.tensorflow.core import convert_to_tensor
|
| 18 |
+
from keras.src.backend.tensorflow.core import device_scope
|
| 19 |
+
from keras.src.backend.tensorflow.core import is_tensor
|
| 20 |
+
from keras.src.backend.tensorflow.core import name_scope
|
| 21 |
+
from keras.src.backend.tensorflow.core import random_seed_dtype
|
| 22 |
+
from keras.src.backend.tensorflow.core import scatter
|
| 23 |
+
from keras.src.backend.tensorflow.core import shape
|
| 24 |
+
from keras.src.backend.tensorflow.core import stop_gradient
|
| 25 |
+
from keras.src.backend.tensorflow.core import vectorized_map
|
| 26 |
+
from keras.src.backend.tensorflow.rnn import cudnn_ok
|
| 27 |
+
from keras.src.backend.tensorflow.rnn import gru
|
| 28 |
+
from keras.src.backend.tensorflow.rnn import lstm
|
| 29 |
+
from keras.src.backend.tensorflow.rnn import rnn
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/core.py
ADDED
|
@@ -0,0 +1,670 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import builtins
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import tensorflow as tf
|
| 5 |
+
from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice
|
| 6 |
+
|
| 7 |
+
from keras.src import tree
|
| 8 |
+
from keras.src.backend.common import KerasVariable
|
| 9 |
+
from keras.src.backend.common import global_state
|
| 10 |
+
from keras.src.backend.common import is_int_dtype
|
| 11 |
+
from keras.src.backend.common import standardize_dtype
|
| 12 |
+
from keras.src.backend.common.backend_utils import slice_along_axis
|
| 13 |
+
from keras.src.backend.common.keras_tensor import KerasTensor
|
| 14 |
+
from keras.src.backend.common.name_scope import name_scope as base_name_scope
|
| 15 |
+
from keras.src.backend.common.stateless_scope import StatelessScope
|
| 16 |
+
from keras.src.backend.common.stateless_scope import in_stateless_scope
|
| 17 |
+
from keras.src.backend.common.symbolic_scope import SymbolicScope
|
| 18 |
+
from keras.src.backend.tensorflow.sparse import sparse_to_dense
|
| 19 |
+
from keras.src.utils.naming import auto_name
|
| 20 |
+
|
| 21 |
+
SUPPORTS_SPARSE_TENSORS = True
|
| 22 |
+
# https://github.com/tensorflow/tensorflow/issues/78338
|
| 23 |
+
IS_THREAD_SAFE = False
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class Variable(
|
| 27 |
+
KerasVariable,
|
| 28 |
+
tf.__internal__.types.Tensor,
|
| 29 |
+
tf.__internal__.tracking.Trackable,
|
| 30 |
+
):
|
| 31 |
+
_should_act_as_resource_variable = True
|
| 32 |
+
|
| 33 |
+
@property
|
| 34 |
+
def handle(self):
|
| 35 |
+
return self.value.handle
|
| 36 |
+
|
| 37 |
+
def _initialize(self, value):
|
| 38 |
+
self._value = tf.Variable(
|
| 39 |
+
value,
|
| 40 |
+
dtype=self._dtype,
|
| 41 |
+
trainable=self.trainable,
|
| 42 |
+
name=self.name,
|
| 43 |
+
aggregation=self._map_aggregation(self.aggregation),
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
def _initialize_with_initializer(self, initializer):
|
| 47 |
+
self._initialize(lambda: initializer(self._shape, dtype=self._dtype))
|
| 48 |
+
|
| 49 |
+
def _deferred_initialize(self):
|
| 50 |
+
if self._value is not None:
|
| 51 |
+
raise ValueError(f"Variable {self.path} is already initialized.")
|
| 52 |
+
|
| 53 |
+
if in_stateless_scope():
|
| 54 |
+
raise ValueError(
|
| 55 |
+
"You are attempting to initialize a variable "
|
| 56 |
+
"while in a stateless scope. This is disallowed. "
|
| 57 |
+
"Make sure that all variables are initialized "
|
| 58 |
+
"before you start using your layer/model objects."
|
| 59 |
+
)
|
| 60 |
+
with tf.init_scope():
|
| 61 |
+
self._initialize_with_initializer(self._initializer)
|
| 62 |
+
self._initializer = None
|
| 63 |
+
|
| 64 |
+
def _direct_assign(self, value):
|
| 65 |
+
self._value.assign(tf.cast(value, self._value.dtype))
|
| 66 |
+
|
| 67 |
+
def _convert_to_tensor(self, value, dtype=None):
|
| 68 |
+
return convert_to_tensor(value, dtype=dtype)
|
| 69 |
+
|
| 70 |
+
def numpy(self): # noqa: F811
|
| 71 |
+
return self.value.numpy()
|
| 72 |
+
|
| 73 |
+
@property
|
| 74 |
+
def shape(self):
|
| 75 |
+
return tf.TensorShape(super().shape)
|
| 76 |
+
|
| 77 |
+
# Overload native accessor.
|
| 78 |
+
def __tf_tensor__(self, dtype=None, name=None):
|
| 79 |
+
return tf.convert_to_tensor(self.value, dtype=dtype, name=name)
|
| 80 |
+
|
| 81 |
+
# Methods below are for SavedModel support
|
| 82 |
+
@property
|
| 83 |
+
def _shared_name(self):
|
| 84 |
+
return self.value._shared_name
|
| 85 |
+
|
| 86 |
+
def _serialize_to_tensors(self):
|
| 87 |
+
try:
|
| 88 |
+
return self.value._serialize_to_tensors()
|
| 89 |
+
except NotImplementedError:
|
| 90 |
+
return {"VARIABLE_VALUE": self.value}
|
| 91 |
+
|
| 92 |
+
def _restore_from_tensors(self, restored_tensors):
|
| 93 |
+
try:
|
| 94 |
+
return self.value._restore_from_tensors(restored_tensors)
|
| 95 |
+
except NotImplementedError:
|
| 96 |
+
self.assign(restored_tensors["VARIABLE_VALUE"])
|
| 97 |
+
return self.value
|
| 98 |
+
|
| 99 |
+
def _copy_trackable_to_cpu(self, object_map):
|
| 100 |
+
self.value._copy_trackable_to_cpu(object_map)
|
| 101 |
+
object_map[self] = tf.Variable(object_map[self.value])
|
| 102 |
+
|
| 103 |
+
def _export_to_saved_model_graph(
|
| 104 |
+
self, object_map, tensor_map, options, **kwargs
|
| 105 |
+
):
|
| 106 |
+
resource_list = self.value._export_to_saved_model_graph(
|
| 107 |
+
object_map, tensor_map, options, **kwargs
|
| 108 |
+
)
|
| 109 |
+
object_map[self] = tf.Variable(object_map[self.value])
|
| 110 |
+
return resource_list
|
| 111 |
+
|
| 112 |
+
def _write_object_proto(self, proto, options):
|
| 113 |
+
return self.value._write_object_proto(proto, options)
|
| 114 |
+
|
| 115 |
+
def _map_aggregation(self, aggregation):
|
| 116 |
+
mapping = {
|
| 117 |
+
"none": tf.VariableAggregation.NONE,
|
| 118 |
+
"sum": tf.VariableAggregation.SUM,
|
| 119 |
+
"mean": tf.VariableAggregation.MEAN,
|
| 120 |
+
"only_first_replica": tf.VariableAggregation.ONLY_FIRST_REPLICA,
|
| 121 |
+
}
|
| 122 |
+
return mapping[aggregation]
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def convert_to_tensor(x, dtype=None, sparse=None):
|
| 126 |
+
if isinstance(x, tf.SparseTensor) and sparse is not None and not sparse:
|
| 127 |
+
x = sparse_to_dense(x)
|
| 128 |
+
if dtype is not None:
|
| 129 |
+
dtype = standardize_dtype(dtype)
|
| 130 |
+
if not tf.is_tensor(x):
|
| 131 |
+
if dtype == "bool" or is_int_dtype(dtype):
|
| 132 |
+
# TensorFlow conversion is stricter than other backends, it does not
|
| 133 |
+
# allow ints for bools or floats for ints. We convert without dtype
|
| 134 |
+
# and cast instead.
|
| 135 |
+
x = tf.convert_to_tensor(x)
|
| 136 |
+
return tf.cast(x, dtype)
|
| 137 |
+
return tf.convert_to_tensor(x, dtype=dtype)
|
| 138 |
+
elif dtype is not None and not x.dtype == dtype:
|
| 139 |
+
if isinstance(x, tf.SparseTensor):
|
| 140 |
+
x_shape = x.shape
|
| 141 |
+
x = tf.cast(x, dtype)
|
| 142 |
+
x.set_shape(x_shape)
|
| 143 |
+
return x
|
| 144 |
+
return tf.cast(x, dtype=dtype)
|
| 145 |
+
return x
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def convert_to_numpy(x):
|
| 149 |
+
if isinstance(x, tf.SparseTensor):
|
| 150 |
+
x = sparse_to_dense(x)
|
| 151 |
+
elif isinstance(x, tf.IndexedSlices):
|
| 152 |
+
x = tf.convert_to_tensor(x)
|
| 153 |
+
elif isinstance(x, tf.RaggedTensor):
|
| 154 |
+
x = x.to_tensor()
|
| 155 |
+
return np.array(x)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def is_tensor(x):
|
| 159 |
+
return tf.is_tensor(x)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def shape(x):
|
| 163 |
+
"""Always return a tuple shape.
|
| 164 |
+
|
| 165 |
+
`tf.shape` will return a `tf.Tensor`, which differs from the tuple return
|
| 166 |
+
type on the torch and jax backends. We write our own method instead which
|
| 167 |
+
always returns a tuple, with integer values when the shape is known, and
|
| 168 |
+
tensor values when the shape is unknown (this is tf specific, as dynamic
|
| 169 |
+
shapes do not apply in other backends).
|
| 170 |
+
"""
|
| 171 |
+
if isinstance(x, KerasTensor):
|
| 172 |
+
return x.shape
|
| 173 |
+
if not tf.is_tensor(x):
|
| 174 |
+
x = tf.convert_to_tensor(x)
|
| 175 |
+
if x.shape == tf.TensorShape(None):
|
| 176 |
+
raise ValueError(
|
| 177 |
+
"All tensors passed to `ops.shape` must have a statically known "
|
| 178 |
+
f"rank. Received: x={x} with unknown rank."
|
| 179 |
+
)
|
| 180 |
+
shape = x.shape.as_list()
|
| 181 |
+
dynamic = tf.shape(x)
|
| 182 |
+
for i in range(len(shape)):
|
| 183 |
+
if shape[i] is None:
|
| 184 |
+
try:
|
| 185 |
+
shape[i] = dynamic[i]
|
| 186 |
+
except:
|
| 187 |
+
# With RaggedTensors, accessing a ragged dimension will fail,
|
| 188 |
+
# we leave it as None.
|
| 189 |
+
pass
|
| 190 |
+
return tuple(shape)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def cast(x, dtype):
|
| 194 |
+
dtype = standardize_dtype(dtype)
|
| 195 |
+
if isinstance(x, tf.SparseTensor):
|
| 196 |
+
x_shape = x.shape
|
| 197 |
+
x = tf.cast(x, dtype)
|
| 198 |
+
x.set_shape(x_shape)
|
| 199 |
+
return x
|
| 200 |
+
else:
|
| 201 |
+
return tf.cast(x, dtype=dtype)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def compute_output_spec(fn, *args, **kwargs):
|
| 205 |
+
with StatelessScope(), SymbolicScope():
|
| 206 |
+
graph_name = auto_name("scratch_graph")
|
| 207 |
+
with tf.__internal__.FuncGraph(graph_name).as_default():
|
| 208 |
+
|
| 209 |
+
def convert_keras_tensor_to_tf(x):
|
| 210 |
+
if isinstance(x, KerasTensor):
|
| 211 |
+
if x.sparse:
|
| 212 |
+
return tf.compat.v1.sparse_placeholder(
|
| 213 |
+
shape=x.shape, dtype=x.dtype
|
| 214 |
+
)
|
| 215 |
+
else:
|
| 216 |
+
return tf.compat.v1.placeholder(
|
| 217 |
+
shape=x.shape, dtype=x.dtype
|
| 218 |
+
)
|
| 219 |
+
return x
|
| 220 |
+
|
| 221 |
+
args, kwargs = tree.map_structure(
|
| 222 |
+
convert_keras_tensor_to_tf, (args, kwargs)
|
| 223 |
+
)
|
| 224 |
+
tf_out = fn(*args, **kwargs)
|
| 225 |
+
|
| 226 |
+
def convert_tf_to_keras_tensor(x):
|
| 227 |
+
if tf.is_tensor(x):
|
| 228 |
+
return KerasTensor(
|
| 229 |
+
x.shape, x.dtype, sparse=isinstance(x, tf.SparseTensor)
|
| 230 |
+
)
|
| 231 |
+
return x
|
| 232 |
+
|
| 233 |
+
output_spec = tree.map_structure(convert_tf_to_keras_tensor, tf_out)
|
| 234 |
+
return output_spec
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def cond(pred, true_fn, false_fn):
|
| 238 |
+
if isinstance(pred, tf.Variable):
|
| 239 |
+
return tf.cond(pred, true_fn=true_fn, false_fn=false_fn)
|
| 240 |
+
return tf.__internal__.smart_cond.smart_cond(
|
| 241 |
+
pred, true_fn=true_fn, false_fn=false_fn
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def vectorized_map(function, elements):
|
| 246 |
+
return tf.vectorized_map(function, elements)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def map(f, xs):
|
| 250 |
+
xs = tree.map_structure(convert_to_tensor, xs)
|
| 251 |
+
|
| 252 |
+
def get_fn_output_signature(x):
|
| 253 |
+
out = f(x)
|
| 254 |
+
return tree.map_structure(tf.TensorSpec.from_tensor, out)
|
| 255 |
+
|
| 256 |
+
if tree.is_nested(xs):
|
| 257 |
+
input = tree.pack_sequence_as(xs, [x[0] for x in tree.flatten(xs)])
|
| 258 |
+
fn_output_signature = get_fn_output_signature(input)
|
| 259 |
+
return tf.map_fn(f, xs, fn_output_signature=fn_output_signature)
|
| 260 |
+
else:
|
| 261 |
+
fn_output_signature = get_fn_output_signature(xs[0])
|
| 262 |
+
return tf.map_fn(f, xs, fn_output_signature=fn_output_signature)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def scan(f, init, xs=None, length=None, reverse=False, unroll=1):
|
| 266 |
+
# We have reimplemented `scan` to match the behavior of `jax.lax.scan`
|
| 267 |
+
# Ref: tf.scan, jax.lax.scan
|
| 268 |
+
if not callable(f):
|
| 269 |
+
raise TypeError(f"`f` should be a callable. Received: f={f}")
|
| 270 |
+
if not isinstance(unroll, bool):
|
| 271 |
+
if not isinstance(unroll, int) or unroll < 1:
|
| 272 |
+
raise ValueError(
|
| 273 |
+
"`unroll` must be an positive integer or boolean. "
|
| 274 |
+
f"Received: unroll={unroll}"
|
| 275 |
+
)
|
| 276 |
+
if xs is None and length is None:
|
| 277 |
+
raise ValueError("Got no `xs` to scan over and `length` not provided.")
|
| 278 |
+
|
| 279 |
+
input_is_sequence = tree.is_nested(xs)
|
| 280 |
+
output_is_sequence = tree.is_nested(init)
|
| 281 |
+
|
| 282 |
+
def pack_input(x):
|
| 283 |
+
return tree.pack_sequence_as(xs, x) if input_is_sequence else x[0]
|
| 284 |
+
|
| 285 |
+
def pack_output(x):
|
| 286 |
+
return tree.pack_sequence_as(init, x) if output_is_sequence else x[0]
|
| 287 |
+
|
| 288 |
+
if xs is None:
|
| 289 |
+
xs_flat = []
|
| 290 |
+
n = int(length)
|
| 291 |
+
else:
|
| 292 |
+
# xs_flat = flatten_input(xs)
|
| 293 |
+
xs_flat = tree.flatten(xs)
|
| 294 |
+
xs_flat = [tf.convert_to_tensor(elem) for elem in xs_flat]
|
| 295 |
+
n = int(length) if length is not None else tf.shape(xs_flat[0])[0]
|
| 296 |
+
|
| 297 |
+
# TensorArrays are always flat
|
| 298 |
+
xs_array = [
|
| 299 |
+
tf.TensorArray(
|
| 300 |
+
dtype=x.dtype,
|
| 301 |
+
size=n,
|
| 302 |
+
dynamic_size=False,
|
| 303 |
+
element_shape=x.shape[1:],
|
| 304 |
+
infer_shape=True,
|
| 305 |
+
)
|
| 306 |
+
for x in xs_flat
|
| 307 |
+
]
|
| 308 |
+
xs_array = [x_a.unstack(x) for x_a, x in zip(xs_array, xs_flat)]
|
| 309 |
+
|
| 310 |
+
init_flat = tree.flatten(init)
|
| 311 |
+
carry_flat = [tf.convert_to_tensor(init) for init in init_flat]
|
| 312 |
+
|
| 313 |
+
# Store the intermediate values
|
| 314 |
+
# Note: there is a constraint that the output of `f` must have the same
|
| 315 |
+
# shape and dtype as carry (`init`).
|
| 316 |
+
ys_array = [
|
| 317 |
+
tf.TensorArray(
|
| 318 |
+
dtype=carry.dtype,
|
| 319 |
+
size=n,
|
| 320 |
+
dynamic_size=False,
|
| 321 |
+
element_shape=carry.shape,
|
| 322 |
+
infer_shape=True,
|
| 323 |
+
)
|
| 324 |
+
for carry in carry_flat
|
| 325 |
+
]
|
| 326 |
+
carry_array = [
|
| 327 |
+
tf.TensorArray(
|
| 328 |
+
dtype=carry.dtype,
|
| 329 |
+
size=1,
|
| 330 |
+
dynamic_size=False,
|
| 331 |
+
clear_after_read=False,
|
| 332 |
+
element_shape=carry.shape,
|
| 333 |
+
infer_shape=True,
|
| 334 |
+
)
|
| 335 |
+
for carry in carry_flat
|
| 336 |
+
]
|
| 337 |
+
carry_array = [
|
| 338 |
+
carry.write(0, c) for (carry, c) in zip(carry_array, carry_flat)
|
| 339 |
+
]
|
| 340 |
+
|
| 341 |
+
def loop_body(i, carry_array, ys_array):
|
| 342 |
+
packed_xs = (
|
| 343 |
+
pack_input([xs.read(i) for xs in xs_array])
|
| 344 |
+
if len(xs_array) > 0
|
| 345 |
+
else None
|
| 346 |
+
)
|
| 347 |
+
packed_carry = pack_output([carry.read(0) for carry in carry_array])
|
| 348 |
+
|
| 349 |
+
carry, ys = f(packed_carry, packed_xs)
|
| 350 |
+
|
| 351 |
+
if ys is not None:
|
| 352 |
+
flat_ys = tree.flatten(ys)
|
| 353 |
+
ys_array = [ys.write(i, v) for (ys, v) in zip(ys_array, flat_ys)]
|
| 354 |
+
if carry is not None:
|
| 355 |
+
flat_carry = tree.flatten(carry)
|
| 356 |
+
carry_array = [
|
| 357 |
+
carry.write(0, v) for (carry, v) in zip(carry_array, flat_carry)
|
| 358 |
+
]
|
| 359 |
+
next_i = i + 1 if not reverse else i - 1
|
| 360 |
+
return (next_i, carry_array, ys_array)
|
| 361 |
+
|
| 362 |
+
if isinstance(unroll, bool):
|
| 363 |
+
unroll = max(n, 1) if unroll else 1
|
| 364 |
+
|
| 365 |
+
_, carry_array, ys_array = tf.while_loop(
|
| 366 |
+
lambda i, _1, _2: i >= 0 if reverse else i < n,
|
| 367 |
+
loop_body,
|
| 368 |
+
(n - 1 if reverse else 0, carry_array, ys_array),
|
| 369 |
+
parallel_iterations=unroll,
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
ys_flat = [ys.stack() for ys in ys_array]
|
| 373 |
+
carry_flat = [carry.read(0) for carry in carry_array]
|
| 374 |
+
if xs is not None:
|
| 375 |
+
n_static = xs_flat[0].get_shape().with_rank_at_least(1)[0]
|
| 376 |
+
if not isinstance(n_static, int):
|
| 377 |
+
for x in xs_flat[1:]:
|
| 378 |
+
n_static.assert_is_compatible_with(
|
| 379 |
+
x.get_shape().with_rank_at_least(1)[0]
|
| 380 |
+
)
|
| 381 |
+
for r in ys_flat:
|
| 382 |
+
r.set_shape(tf.TensorShape(n_static).concatenate(r.get_shape()[1:]))
|
| 383 |
+
return pack_output(carry_flat), pack_output(ys_flat)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def associative_scan(f, elems, reverse=False, axis=0):
|
| 387 |
+
# Implementation is the same as tfp.math.scan_associative
|
| 388 |
+
# with additional checks to ensure similar behavior with jax
|
| 389 |
+
if not callable(f):
|
| 390 |
+
raise TypeError(f"`f` should be a callable. Received: f={f}")
|
| 391 |
+
elems_flat = tree.flatten(elems)
|
| 392 |
+
elems_flat = [tf.convert_to_tensor(elem) for elem in elems_flat]
|
| 393 |
+
if reverse:
|
| 394 |
+
elems_flat = [tf.reverse(elem, [axis]) for elem in elems_flat]
|
| 395 |
+
|
| 396 |
+
def _combine(a_flat, b_flat):
|
| 397 |
+
a = tree.pack_sequence_as(elems, a_flat)
|
| 398 |
+
b = tree.pack_sequence_as(elems, b_flat)
|
| 399 |
+
c = f(a, b)
|
| 400 |
+
c_flat = tree.flatten(c)
|
| 401 |
+
return c_flat
|
| 402 |
+
|
| 403 |
+
def _get_dim(x):
|
| 404 |
+
return shape(x)[axis]
|
| 405 |
+
|
| 406 |
+
# TODO add constant dim check
|
| 407 |
+
num_elems = _get_dim(elems_flat[0])
|
| 408 |
+
if not all(_get_dim(elem) == num_elems for elem in elems_flat[1:]):
|
| 409 |
+
raise ValueError(
|
| 410 |
+
"Array inputs to associative_scan must have the same "
|
| 411 |
+
"first dimension. (saw: {})".format(
|
| 412 |
+
[tf.shape(elem) for elem in elems_flat]
|
| 413 |
+
)
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
def _interleave(a, b, axis):
|
| 417 |
+
# [a b c ...] [d e f ...] -> [a d b e c f ...]
|
| 418 |
+
num_elems_a = _get_dim(a)
|
| 419 |
+
num_elems_b = _get_dim(b)
|
| 420 |
+
|
| 421 |
+
# Note that interleaving implies rank(a)==rank(b).
|
| 422 |
+
axis = tf.where(axis >= 0, axis, tf.rank(a) + axis)
|
| 423 |
+
axis = (
|
| 424 |
+
int(axis) # Avoid ndarray values.
|
| 425 |
+
if tf.get_static_value(axis) is not None
|
| 426 |
+
else axis
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
def _interleave_with_b(a):
|
| 430 |
+
return tf.reshape(
|
| 431 |
+
# Work around lack of support for Tensor axes in
|
| 432 |
+
# `tf.stack` by using `concat` and `expand_dims` instead.
|
| 433 |
+
tf.concat(
|
| 434 |
+
[
|
| 435 |
+
tf.expand_dims(a, axis=axis + 1),
|
| 436 |
+
tf.expand_dims(b, axis=axis + 1),
|
| 437 |
+
],
|
| 438 |
+
axis=axis + 1,
|
| 439 |
+
),
|
| 440 |
+
tf.concat(
|
| 441 |
+
[
|
| 442 |
+
a.get_shape()[:axis],
|
| 443 |
+
[2 * num_elems_b],
|
| 444 |
+
a.get_shape()[axis + 1 :],
|
| 445 |
+
],
|
| 446 |
+
axis=0,
|
| 447 |
+
),
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
return tf.cond(
|
| 451 |
+
tf.equal(num_elems_a, num_elems_b + 1),
|
| 452 |
+
lambda: tf.concat(
|
| 453 |
+
[
|
| 454 |
+
_interleave_with_b(
|
| 455 |
+
slice_along_axis(a, None, -1, axis=axis)
|
| 456 |
+
),
|
| 457 |
+
slice_along_axis(a, -1, None, axis=axis),
|
| 458 |
+
],
|
| 459 |
+
axis=axis,
|
| 460 |
+
),
|
| 461 |
+
lambda: _interleave_with_b(a),
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
def _scan(elems):
|
| 465 |
+
elem_length = _get_dim(elems[0])
|
| 466 |
+
a = [slice_along_axis(elem, 0, -1, step=2, axis=axis) for elem in elems]
|
| 467 |
+
b = [
|
| 468 |
+
slice_along_axis(elem, 1, None, step=2, axis=axis) for elem in elems
|
| 469 |
+
]
|
| 470 |
+
reduced_elems = _combine(a, b)
|
| 471 |
+
|
| 472 |
+
def _handle_base_case_elem_length_two():
|
| 473 |
+
return [
|
| 474 |
+
tf.concat(
|
| 475 |
+
[slice_along_axis(elem, 0, 1, axis=axis), reduced_elem],
|
| 476 |
+
axis=axis,
|
| 477 |
+
)
|
| 478 |
+
for (reduced_elem, elem) in zip(reduced_elems, elems)
|
| 479 |
+
]
|
| 480 |
+
|
| 481 |
+
def _handle_base_case_elem_length_three():
|
| 482 |
+
reduced_reduced_elems = _combine(
|
| 483 |
+
reduced_elems,
|
| 484 |
+
[slice_along_axis(elem, 2, 3, axis=axis) for elem in elems],
|
| 485 |
+
)
|
| 486 |
+
return [
|
| 487 |
+
tf.concat(
|
| 488 |
+
[
|
| 489 |
+
slice_along_axis(elem, 0, 1, axis=axis),
|
| 490 |
+
reduced_elem,
|
| 491 |
+
reduced_reduced_elem,
|
| 492 |
+
],
|
| 493 |
+
axis=axis,
|
| 494 |
+
)
|
| 495 |
+
for (reduced_reduced_elem, reduced_elem, elem) in zip(
|
| 496 |
+
reduced_reduced_elems, reduced_elems, elems
|
| 497 |
+
)
|
| 498 |
+
]
|
| 499 |
+
|
| 500 |
+
at_base_case = tf.logical_or(
|
| 501 |
+
tf.equal(elem_length, 2), tf.equal(elem_length, 3)
|
| 502 |
+
)
|
| 503 |
+
|
| 504 |
+
def _base_case():
|
| 505 |
+
return tf.cond(
|
| 506 |
+
tf.equal(elem_length, 2),
|
| 507 |
+
_handle_base_case_elem_length_two,
|
| 508 |
+
_handle_base_case_elem_length_three,
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
def _recursive_case():
|
| 512 |
+
odd_elems = _scan(reduced_elems)
|
| 513 |
+
|
| 514 |
+
def _even_length_case():
|
| 515 |
+
return _combine(
|
| 516 |
+
[
|
| 517 |
+
slice_along_axis(odd_elem, 0, -1, axis=axis)
|
| 518 |
+
for odd_elem in odd_elems
|
| 519 |
+
],
|
| 520 |
+
[
|
| 521 |
+
slice_along_axis(elem, 2, None, 2, axis=axis)
|
| 522 |
+
for elem in elems
|
| 523 |
+
],
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
def _odd_length_case():
|
| 527 |
+
return _combine(
|
| 528 |
+
[odd_elem for odd_elem in odd_elems],
|
| 529 |
+
[
|
| 530 |
+
slice_along_axis(elem, 2, None, 2, axis=axis)
|
| 531 |
+
for elem in elems
|
| 532 |
+
],
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
results = tf.cond(
|
| 536 |
+
tf.equal(elem_length % 2, 0),
|
| 537 |
+
_even_length_case,
|
| 538 |
+
_odd_length_case,
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
even_elems = [
|
| 542 |
+
tf.concat(
|
| 543 |
+
[slice_along_axis(elem, 0, 1, axis=axis), result], axis=axis
|
| 544 |
+
)
|
| 545 |
+
for (elem, result) in zip(elems, results)
|
| 546 |
+
]
|
| 547 |
+
return list(
|
| 548 |
+
builtins.map(
|
| 549 |
+
lambda a, b: _interleave(a, b, axis=axis),
|
| 550 |
+
even_elems,
|
| 551 |
+
odd_elems,
|
| 552 |
+
)
|
| 553 |
+
)
|
| 554 |
+
|
| 555 |
+
return tf.cond(at_base_case, _base_case, _recursive_case)
|
| 556 |
+
|
| 557 |
+
scans = _scan(elems_flat)
|
| 558 |
+
if reverse:
|
| 559 |
+
scans = [tf.reverse(scanned, [axis]) for scanned in scans]
|
| 560 |
+
|
| 561 |
+
return tree.pack_sequence_as(elems, scans)
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
def scatter(indices, values, shape):
|
| 565 |
+
return tf.scatter_nd(indices, values, shape)
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def scatter_update(inputs, indices, updates):
|
| 569 |
+
return tf.tensor_scatter_nd_update(inputs, indices, updates)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
def slice(inputs, start_indices, shape):
|
| 573 |
+
return tf.slice(inputs, start_indices, shape)
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
def slice_update(inputs, start_indices, updates):
|
| 577 |
+
return dynamic_update_slice(inputs, updates, start_indices)
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
def switch(index, branches, *operands):
|
| 581 |
+
index = convert_to_tensor(index, "int32")
|
| 582 |
+
index = tf.clip_by_value(index, 0, len(branches) - 1)
|
| 583 |
+
|
| 584 |
+
# Workaround to deal with python closures. More details:
|
| 585 |
+
# https://github.com/tensorflow/tensorflow/issues/8776#issuecomment-311383887
|
| 586 |
+
def gen_fn(i):
|
| 587 |
+
return lambda: branches[i](*operands)
|
| 588 |
+
|
| 589 |
+
branch_fns = [gen_fn(i) for i in range(len(branches))]
|
| 590 |
+
return tf.switch_case(index, branch_fns)
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
def while_loop(
|
| 594 |
+
cond,
|
| 595 |
+
body,
|
| 596 |
+
loop_vars,
|
| 597 |
+
maximum_iterations=None,
|
| 598 |
+
):
|
| 599 |
+
is_tuple = isinstance(loop_vars, (tuple, list))
|
| 600 |
+
loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,)
|
| 601 |
+
|
| 602 |
+
def _body(*args):
|
| 603 |
+
outputs = body(*args)
|
| 604 |
+
return tuple(outputs) if is_tuple else (outputs,)
|
| 605 |
+
|
| 606 |
+
outputs = tf.while_loop(
|
| 607 |
+
cond,
|
| 608 |
+
_body,
|
| 609 |
+
loop_vars,
|
| 610 |
+
maximum_iterations=maximum_iterations,
|
| 611 |
+
)
|
| 612 |
+
return outputs if is_tuple else outputs[0]
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
def fori_loop(lower, upper, body_fun, init_val):
|
| 616 |
+
return tf.while_loop(
|
| 617 |
+
lambda i, val: i < upper,
|
| 618 |
+
lambda i, val: (i + 1, body_fun(i, val)),
|
| 619 |
+
(lower, init_val),
|
| 620 |
+
)[1]
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
def stop_gradient(variable):
|
| 624 |
+
return tf.stop_gradient(variable)
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def unstack(x, num=None, axis=0):
|
| 628 |
+
return tf.unstack(x, num=num, axis=axis)
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def random_seed_dtype():
|
| 632 |
+
# tensorflow random operation only works on int32/int64, not uint32.
|
| 633 |
+
return "int64"
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
def custom_gradient(fun):
|
| 637 |
+
return tf.custom_gradient(f=fun)
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
class name_scope(base_name_scope):
|
| 641 |
+
def __init__(self, name, **kwargs):
|
| 642 |
+
super().__init__(name, **kwargs)
|
| 643 |
+
self._tf_name_scope = tf.name_scope(name)
|
| 644 |
+
|
| 645 |
+
def __enter__(self):
|
| 646 |
+
name_scope_stack = global_state.get_global_attribute(
|
| 647 |
+
"name_scope_stack", default=[], set_to_default=True
|
| 648 |
+
)
|
| 649 |
+
if self.deduplicate and name_scope_stack:
|
| 650 |
+
parent_caller = name_scope_stack[-1].caller
|
| 651 |
+
parent_name = name_scope_stack[-1].name
|
| 652 |
+
if (
|
| 653 |
+
self.caller is not None
|
| 654 |
+
and self.caller is parent_caller
|
| 655 |
+
and self.name == parent_name
|
| 656 |
+
):
|
| 657 |
+
return self
|
| 658 |
+
name_scope_stack.append(self)
|
| 659 |
+
self._pop_on_exit = True
|
| 660 |
+
self._tf_name_scope.__enter__()
|
| 661 |
+
return self
|
| 662 |
+
|
| 663 |
+
def __exit__(self, *args, **kwargs):
|
| 664 |
+
super().__exit__(*args, **kwargs)
|
| 665 |
+
if self._pop_on_exit:
|
| 666 |
+
self._tf_name_scope.__exit__(*args, **kwargs)
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
def device_scope(device_name):
|
| 670 |
+
return tf.device(device_name)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/distribution_lib.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""!!!DO NOT USE!!!
|
| 2 |
+
|
| 3 |
+
Distribution related class for Tensorflow backend.
|
| 4 |
+
|
| 5 |
+
This is just a prototype and we might want to unify it
|
| 6 |
+
with other backends in the future.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import tensorflow as tf
|
| 10 |
+
from tensorflow.experimental import dtensor
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def list_devices(device_type=None):
|
| 14 |
+
"""Return all the available devices based on the device type.
|
| 15 |
+
|
| 16 |
+
Note that this should return the global devices in a distributed setting.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
device_type: string of `"cpu"`, `"gpu"` or `"tpu"`. Default to `gpu` or
|
| 20 |
+
`tpu` if available when device_type is not provided. Otherwise will
|
| 21 |
+
return the `cpu` devices.
|
| 22 |
+
|
| 23 |
+
Return:
|
| 24 |
+
List of devices that are available for distribute computation.
|
| 25 |
+
"""
|
| 26 |
+
device_type = device_type.upper() if device_type else None
|
| 27 |
+
|
| 28 |
+
# DTensor doesn't support getting global devices, even when knowing the
|
| 29 |
+
# Mesh. Use TF API instead to get global devices. Coordinator service is
|
| 30 |
+
# enabled by default with DTensor, so that list_logical_devices() returns
|
| 31 |
+
# a list of global devices. More context can be found in b/254911601.
|
| 32 |
+
tf_devices = tf.config.list_logical_devices(device_type=device_type)
|
| 33 |
+
cpu_devices = []
|
| 34 |
+
other_devices = []
|
| 35 |
+
for device in tf_devices:
|
| 36 |
+
if device.device_type.lower() == "cpu":
|
| 37 |
+
cpu_devices.append(device)
|
| 38 |
+
else:
|
| 39 |
+
other_devices.append(device)
|
| 40 |
+
if device_type is None:
|
| 41 |
+
tf_devices = other_devices if len(other_devices) > 0 else cpu_devices
|
| 42 |
+
return [
|
| 43 |
+
f"{device.device_type.lower()}:{device.name.split(':')[-1]}"
|
| 44 |
+
for device in tf_devices
|
| 45 |
+
]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def distribute_value(value, tensor_layout):
|
| 49 |
+
# TODO
|
| 50 |
+
pass
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def _to_dtensor_mesh(device_mesh):
|
| 54 |
+
"""Convert the DeviceMesh to Tensorflow backend specific Mesh.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
device_mesh: DeviceMesh instance to convert.
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
A `tf.dtensor.Mesh` instance.
|
| 61 |
+
"""
|
| 62 |
+
mesh_dims = list(zip(device_mesh.axis_names, device_mesh.shape))
|
| 63 |
+
return dtensor.create_distributed_mesh(
|
| 64 |
+
mesh_dims=mesh_dims, local_devices=device_mesh.devices.flatten()
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _to_dtensor_layout(tensor_layout):
|
| 69 |
+
"""Convert the TensorLayout to Tensorflow backend specific Sharding.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
tensor_layout: TensorLayout instance to convert.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
A `tf.dtensor.Layout` instance.
|
| 76 |
+
"""
|
| 77 |
+
if tensor_layout.device_mesh is None:
|
| 78 |
+
raise ValueError(
|
| 79 |
+
"Cannot create sharding when device mesh is not set for "
|
| 80 |
+
"TensorLayout."
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
sharding_specs = [
|
| 84 |
+
axis if axis else dtensor.UNSHARDED for axis in tensor_layout.axes
|
| 85 |
+
]
|
| 86 |
+
dtensor_mesh = _to_dtensor_mesh(tensor_layout.device_mesh)
|
| 87 |
+
return dtensor.Layout(sharding_specs=sharding_specs, mesh=dtensor_mesh)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/export.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
|
| 3 |
+
from keras.src import layers
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TFExportArchive:
|
| 7 |
+
def track(self, resource):
|
| 8 |
+
if not isinstance(resource, tf.__internal__.tracking.Trackable):
|
| 9 |
+
raise ValueError(
|
| 10 |
+
"Invalid resource type. Expected an instance of a "
|
| 11 |
+
"TensorFlow `Trackable` (such as a Keras `Layer` or `Model`). "
|
| 12 |
+
f"Received instead an object of type '{type(resource)}'. "
|
| 13 |
+
f"Object received: {resource}"
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
if isinstance(resource, layers.Layer):
|
| 17 |
+
# Variables in the lists below are actually part of the trackables
|
| 18 |
+
# that get saved, because the lists are created in __init__.
|
| 19 |
+
variables = resource.variables
|
| 20 |
+
trainable_variables = resource.trainable_variables
|
| 21 |
+
non_trainable_variables = resource.non_trainable_variables
|
| 22 |
+
self._tf_trackable.variables += variables
|
| 23 |
+
self._tf_trackable.trainable_variables += trainable_variables
|
| 24 |
+
self._tf_trackable.non_trainable_variables += (
|
| 25 |
+
non_trainable_variables
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
def add_endpoint(self, name, fn, input_signature=None, **kwargs):
|
| 29 |
+
decorated_fn = tf.function(
|
| 30 |
+
fn, input_signature=input_signature, autograph=False
|
| 31 |
+
)
|
| 32 |
+
return decorated_fn
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/image.py
ADDED
|
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import itertools
|
| 3 |
+
import operator
|
| 4 |
+
|
| 5 |
+
import tensorflow as tf
|
| 6 |
+
|
| 7 |
+
from keras.src import backend
|
| 8 |
+
from keras.src.backend.tensorflow.core import convert_to_tensor
|
| 9 |
+
|
| 10 |
+
RESIZE_INTERPOLATIONS = (
|
| 11 |
+
"bilinear",
|
| 12 |
+
"nearest",
|
| 13 |
+
"lanczos3",
|
| 14 |
+
"lanczos5",
|
| 15 |
+
"bicubic",
|
| 16 |
+
"area",
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def rgb_to_grayscale(images, data_format=None):
|
| 21 |
+
images = convert_to_tensor(images)
|
| 22 |
+
data_format = backend.standardize_data_format(data_format)
|
| 23 |
+
channels_axis = -1 if data_format == "channels_last" else -3
|
| 24 |
+
if len(images.shape) not in (3, 4):
|
| 25 |
+
raise ValueError(
|
| 26 |
+
"Invalid images rank: expected rank 3 (single image) "
|
| 27 |
+
"or rank 4 (batch of images). Received input with shape: "
|
| 28 |
+
f"images.shape={images.shape}"
|
| 29 |
+
)
|
| 30 |
+
# Convert to floats
|
| 31 |
+
original_dtype = images.dtype
|
| 32 |
+
compute_dtype = backend.result_type(images.dtype, float)
|
| 33 |
+
images = tf.cast(images, compute_dtype)
|
| 34 |
+
|
| 35 |
+
# Ref: tf.image.rgb_to_grayscale
|
| 36 |
+
rgb_weights = convert_to_tensor(
|
| 37 |
+
[0.2989, 0.5870, 0.1140], dtype=images.dtype
|
| 38 |
+
)
|
| 39 |
+
images = tf.tensordot(images, rgb_weights, axes=(channels_axis, -1))
|
| 40 |
+
images = tf.expand_dims(images, axis=channels_axis)
|
| 41 |
+
return tf.cast(images, original_dtype)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def rgb_to_hsv(images, data_format=None):
|
| 45 |
+
images = convert_to_tensor(images)
|
| 46 |
+
dtype = images.dtype
|
| 47 |
+
data_format = backend.standardize_data_format(data_format)
|
| 48 |
+
if len(images.shape) not in (3, 4):
|
| 49 |
+
raise ValueError(
|
| 50 |
+
"Invalid images rank: expected rank 3 (single image) "
|
| 51 |
+
"or rank 4 (batch of images). Received input with shape: "
|
| 52 |
+
f"images.shape={images.shape}"
|
| 53 |
+
)
|
| 54 |
+
if not backend.is_float_dtype(dtype):
|
| 55 |
+
raise ValueError(
|
| 56 |
+
"Invalid images dtype: expected float dtype. "
|
| 57 |
+
f"Received: images.dtype={backend.standardize_dtype(dtype)}"
|
| 58 |
+
)
|
| 59 |
+
if data_format == "channels_first":
|
| 60 |
+
if len(images.shape) == 4:
|
| 61 |
+
images = tf.transpose(images, (0, 2, 3, 1))
|
| 62 |
+
else:
|
| 63 |
+
images = tf.transpose(images, (1, 2, 0))
|
| 64 |
+
images = tf.image.rgb_to_hsv(images)
|
| 65 |
+
if data_format == "channels_first":
|
| 66 |
+
if len(images.shape) == 4:
|
| 67 |
+
images = tf.transpose(images, (0, 3, 1, 2))
|
| 68 |
+
elif len(images.shape) == 3:
|
| 69 |
+
images = tf.transpose(images, (2, 0, 1))
|
| 70 |
+
return images
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def hsv_to_rgb(images, data_format=None):
|
| 74 |
+
images = convert_to_tensor(images)
|
| 75 |
+
dtype = images.dtype
|
| 76 |
+
data_format = backend.standardize_data_format(data_format)
|
| 77 |
+
if len(images.shape) not in (3, 4):
|
| 78 |
+
raise ValueError(
|
| 79 |
+
"Invalid images rank: expected rank 3 (single image) "
|
| 80 |
+
"or rank 4 (batch of images). Received input with shape: "
|
| 81 |
+
f"images.shape={images.shape}"
|
| 82 |
+
)
|
| 83 |
+
if not backend.is_float_dtype(dtype):
|
| 84 |
+
raise ValueError(
|
| 85 |
+
"Invalid images dtype: expected float dtype. "
|
| 86 |
+
f"Received: images.dtype={backend.standardize_dtype(dtype)}"
|
| 87 |
+
)
|
| 88 |
+
if data_format == "channels_first":
|
| 89 |
+
if len(images.shape) == 4:
|
| 90 |
+
images = tf.transpose(images, (0, 2, 3, 1))
|
| 91 |
+
else:
|
| 92 |
+
images = tf.transpose(images, (1, 2, 0))
|
| 93 |
+
images = tf.image.hsv_to_rgb(images)
|
| 94 |
+
if data_format == "channels_first":
|
| 95 |
+
if len(images.shape) == 4:
|
| 96 |
+
images = tf.transpose(images, (0, 3, 1, 2))
|
| 97 |
+
elif len(images.shape) == 3:
|
| 98 |
+
images = tf.transpose(images, (2, 0, 1))
|
| 99 |
+
return images
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def resize(
|
| 103 |
+
images,
|
| 104 |
+
size,
|
| 105 |
+
interpolation="bilinear",
|
| 106 |
+
antialias=False,
|
| 107 |
+
crop_to_aspect_ratio=False,
|
| 108 |
+
pad_to_aspect_ratio=False,
|
| 109 |
+
fill_mode="constant",
|
| 110 |
+
fill_value=0.0,
|
| 111 |
+
data_format=None,
|
| 112 |
+
):
|
| 113 |
+
data_format = backend.standardize_data_format(data_format)
|
| 114 |
+
if interpolation not in RESIZE_INTERPOLATIONS:
|
| 115 |
+
raise ValueError(
|
| 116 |
+
"Invalid value for argument `interpolation`. Expected of one "
|
| 117 |
+
f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}"
|
| 118 |
+
)
|
| 119 |
+
if fill_mode != "constant":
|
| 120 |
+
raise ValueError(
|
| 121 |
+
"Invalid value for argument `fill_mode`. Only `'constant'` "
|
| 122 |
+
f"is supported. Received: fill_mode={fill_mode}"
|
| 123 |
+
)
|
| 124 |
+
if pad_to_aspect_ratio and crop_to_aspect_ratio:
|
| 125 |
+
raise ValueError(
|
| 126 |
+
"Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` "
|
| 127 |
+
"can be `True`."
|
| 128 |
+
)
|
| 129 |
+
if not len(size) == 2:
|
| 130 |
+
raise ValueError(
|
| 131 |
+
"Argument `size` must be a tuple of two elements "
|
| 132 |
+
f"(height, width). Received: size={size}"
|
| 133 |
+
)
|
| 134 |
+
size = tuple(size)
|
| 135 |
+
if len(images.shape) not in (3, 4):
|
| 136 |
+
raise ValueError(
|
| 137 |
+
"Invalid images rank: expected rank 3 (single image) "
|
| 138 |
+
"or rank 4 (batch of images). Received input with shape: "
|
| 139 |
+
f"images.shape={images.shape}"
|
| 140 |
+
)
|
| 141 |
+
if data_format == "channels_first":
|
| 142 |
+
if len(images.shape) == 4:
|
| 143 |
+
images = tf.transpose(images, (0, 2, 3, 1))
|
| 144 |
+
else:
|
| 145 |
+
images = tf.transpose(images, (1, 2, 0))
|
| 146 |
+
|
| 147 |
+
if crop_to_aspect_ratio:
|
| 148 |
+
shape = tf.shape(images)
|
| 149 |
+
height, width = shape[-3], shape[-2]
|
| 150 |
+
target_height, target_width = size
|
| 151 |
+
crop_height = tf.cast(
|
| 152 |
+
tf.cast(width * target_height, "float32") / target_width,
|
| 153 |
+
"int32",
|
| 154 |
+
)
|
| 155 |
+
crop_height = tf.maximum(tf.minimum(height, crop_height), 1)
|
| 156 |
+
crop_height = tf.cast(crop_height, "int32")
|
| 157 |
+
crop_width = tf.cast(
|
| 158 |
+
tf.cast(height * target_width, "float32") / target_height,
|
| 159 |
+
"int32",
|
| 160 |
+
)
|
| 161 |
+
crop_width = tf.maximum(tf.minimum(width, crop_width), 1)
|
| 162 |
+
crop_width = tf.cast(crop_width, "int32")
|
| 163 |
+
|
| 164 |
+
crop_box_hstart = tf.cast(
|
| 165 |
+
tf.cast(height - crop_height, "float32") / 2, "int32"
|
| 166 |
+
)
|
| 167 |
+
crop_box_wstart = tf.cast(
|
| 168 |
+
tf.cast(width - crop_width, "float32") / 2, "int32"
|
| 169 |
+
)
|
| 170 |
+
if len(images.shape) == 4:
|
| 171 |
+
images = images[
|
| 172 |
+
:,
|
| 173 |
+
crop_box_hstart : crop_box_hstart + crop_height,
|
| 174 |
+
crop_box_wstart : crop_box_wstart + crop_width,
|
| 175 |
+
:,
|
| 176 |
+
]
|
| 177 |
+
else:
|
| 178 |
+
images = images[
|
| 179 |
+
crop_box_hstart : crop_box_hstart + crop_height,
|
| 180 |
+
crop_box_wstart : crop_box_wstart + crop_width,
|
| 181 |
+
:,
|
| 182 |
+
]
|
| 183 |
+
elif pad_to_aspect_ratio:
|
| 184 |
+
shape = tf.shape(images)
|
| 185 |
+
height, width = shape[-3], shape[-2]
|
| 186 |
+
target_height, target_width = size
|
| 187 |
+
pad_height = tf.cast(
|
| 188 |
+
tf.cast(width * target_height, "float32") / target_width,
|
| 189 |
+
"int32",
|
| 190 |
+
)
|
| 191 |
+
pad_height = tf.maximum(height, pad_height)
|
| 192 |
+
pad_height = tf.cast(pad_height, "int32")
|
| 193 |
+
pad_width = tf.cast(
|
| 194 |
+
tf.cast(height * target_width, "float32") / target_height,
|
| 195 |
+
"int32",
|
| 196 |
+
)
|
| 197 |
+
pad_width = tf.maximum(width, pad_width)
|
| 198 |
+
pad_width = tf.cast(pad_width, "int32")
|
| 199 |
+
|
| 200 |
+
img_box_hstart = tf.cast(
|
| 201 |
+
tf.cast(pad_height - height, "float32") / 2, "int32"
|
| 202 |
+
)
|
| 203 |
+
img_box_wstart = tf.cast(
|
| 204 |
+
tf.cast(pad_width - width, "float32") / 2, "int32"
|
| 205 |
+
)
|
| 206 |
+
if len(images.shape) == 4:
|
| 207 |
+
batch_size = tf.shape(images)[0]
|
| 208 |
+
channels = tf.shape(images)[3]
|
| 209 |
+
padded_img = tf.cond(
|
| 210 |
+
img_box_hstart > 0,
|
| 211 |
+
lambda: tf.concat(
|
| 212 |
+
[
|
| 213 |
+
tf.ones(
|
| 214 |
+
(batch_size, img_box_hstart, width, channels),
|
| 215 |
+
dtype=images.dtype,
|
| 216 |
+
)
|
| 217 |
+
* fill_value,
|
| 218 |
+
images,
|
| 219 |
+
tf.ones(
|
| 220 |
+
(batch_size, img_box_hstart, width, channels),
|
| 221 |
+
dtype=images.dtype,
|
| 222 |
+
)
|
| 223 |
+
* fill_value,
|
| 224 |
+
],
|
| 225 |
+
axis=1,
|
| 226 |
+
),
|
| 227 |
+
lambda: images,
|
| 228 |
+
)
|
| 229 |
+
padded_img = tf.cond(
|
| 230 |
+
img_box_wstart > 0,
|
| 231 |
+
lambda: tf.concat(
|
| 232 |
+
[
|
| 233 |
+
tf.ones(
|
| 234 |
+
(batch_size, height, img_box_wstart, channels),
|
| 235 |
+
dtype=images.dtype,
|
| 236 |
+
)
|
| 237 |
+
* fill_value,
|
| 238 |
+
padded_img,
|
| 239 |
+
tf.ones(
|
| 240 |
+
(batch_size, height, img_box_wstart, channels),
|
| 241 |
+
dtype=images.dtype,
|
| 242 |
+
)
|
| 243 |
+
* fill_value,
|
| 244 |
+
],
|
| 245 |
+
axis=2,
|
| 246 |
+
),
|
| 247 |
+
lambda: padded_img,
|
| 248 |
+
)
|
| 249 |
+
else:
|
| 250 |
+
channels = tf.shape(images)[2]
|
| 251 |
+
padded_img = tf.cond(
|
| 252 |
+
img_box_hstart > 0,
|
| 253 |
+
lambda: tf.concat(
|
| 254 |
+
[
|
| 255 |
+
tf.ones(
|
| 256 |
+
(img_box_hstart, width, channels),
|
| 257 |
+
dtype=images.dtype,
|
| 258 |
+
)
|
| 259 |
+
* fill_value,
|
| 260 |
+
images,
|
| 261 |
+
tf.ones(
|
| 262 |
+
(img_box_hstart, width, channels),
|
| 263 |
+
dtype=images.dtype,
|
| 264 |
+
)
|
| 265 |
+
* fill_value,
|
| 266 |
+
],
|
| 267 |
+
axis=0,
|
| 268 |
+
),
|
| 269 |
+
lambda: images,
|
| 270 |
+
)
|
| 271 |
+
padded_img = tf.cond(
|
| 272 |
+
img_box_wstart > 0,
|
| 273 |
+
lambda: tf.concat(
|
| 274 |
+
[
|
| 275 |
+
tf.ones(
|
| 276 |
+
(height, img_box_wstart, channels),
|
| 277 |
+
dtype=images.dtype,
|
| 278 |
+
)
|
| 279 |
+
* fill_value,
|
| 280 |
+
padded_img,
|
| 281 |
+
tf.ones(
|
| 282 |
+
(height, img_box_wstart, channels),
|
| 283 |
+
dtype=images.dtype,
|
| 284 |
+
)
|
| 285 |
+
* fill_value,
|
| 286 |
+
],
|
| 287 |
+
axis=1,
|
| 288 |
+
),
|
| 289 |
+
lambda: padded_img,
|
| 290 |
+
)
|
| 291 |
+
images = padded_img
|
| 292 |
+
|
| 293 |
+
resized = tf.image.resize(
|
| 294 |
+
images, size, method=interpolation, antialias=antialias
|
| 295 |
+
)
|
| 296 |
+
if data_format == "channels_first":
|
| 297 |
+
if len(images.shape) == 4:
|
| 298 |
+
resized = tf.transpose(resized, (0, 3, 1, 2))
|
| 299 |
+
elif len(images.shape) == 3:
|
| 300 |
+
resized = tf.transpose(resized, (2, 0, 1))
|
| 301 |
+
return resized
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
AFFINE_TRANSFORM_INTERPOLATIONS = (
|
| 305 |
+
"nearest",
|
| 306 |
+
"bilinear",
|
| 307 |
+
)
|
| 308 |
+
AFFINE_TRANSFORM_FILL_MODES = (
|
| 309 |
+
"constant",
|
| 310 |
+
"nearest",
|
| 311 |
+
"wrap",
|
| 312 |
+
# "mirror", not supported by TF
|
| 313 |
+
"reflect",
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def affine_transform(
|
| 318 |
+
images,
|
| 319 |
+
transform,
|
| 320 |
+
interpolation="bilinear",
|
| 321 |
+
fill_mode="constant",
|
| 322 |
+
fill_value=0,
|
| 323 |
+
data_format=None,
|
| 324 |
+
):
|
| 325 |
+
data_format = backend.standardize_data_format(data_format)
|
| 326 |
+
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS:
|
| 327 |
+
raise ValueError(
|
| 328 |
+
"Invalid value for argument `interpolation`. Expected of one "
|
| 329 |
+
f"{AFFINE_TRANSFORM_INTERPOLATIONS}. Received: "
|
| 330 |
+
f"interpolation={interpolation}"
|
| 331 |
+
)
|
| 332 |
+
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
|
| 333 |
+
raise ValueError(
|
| 334 |
+
"Invalid value for argument `fill_mode`. Expected of one "
|
| 335 |
+
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
|
| 336 |
+
)
|
| 337 |
+
if len(images.shape) not in (3, 4):
|
| 338 |
+
raise ValueError(
|
| 339 |
+
"Invalid images rank: expected rank 3 (single image) "
|
| 340 |
+
"or rank 4 (batch of images). Received input with shape: "
|
| 341 |
+
f"images.shape={images.shape}"
|
| 342 |
+
)
|
| 343 |
+
if len(transform.shape) not in (1, 2):
|
| 344 |
+
raise ValueError(
|
| 345 |
+
"Invalid transform rank: expected rank 1 (single transform) "
|
| 346 |
+
"or rank 2 (batch of transforms). Received input with shape: "
|
| 347 |
+
f"transform.shape={transform.shape}"
|
| 348 |
+
)
|
| 349 |
+
# unbatched case
|
| 350 |
+
need_squeeze = False
|
| 351 |
+
if len(images.shape) == 3:
|
| 352 |
+
images = tf.expand_dims(images, axis=0)
|
| 353 |
+
need_squeeze = True
|
| 354 |
+
if len(transform.shape) == 1:
|
| 355 |
+
transform = tf.expand_dims(transform, axis=0)
|
| 356 |
+
|
| 357 |
+
if data_format == "channels_first":
|
| 358 |
+
images = tf.transpose(images, (0, 2, 3, 1))
|
| 359 |
+
|
| 360 |
+
affined = tf.raw_ops.ImageProjectiveTransformV3(
|
| 361 |
+
images=images,
|
| 362 |
+
transforms=tf.cast(transform, dtype=tf.float32),
|
| 363 |
+
output_shape=tf.shape(images)[1:-1],
|
| 364 |
+
fill_value=fill_value,
|
| 365 |
+
interpolation=interpolation.upper(),
|
| 366 |
+
fill_mode=fill_mode.upper(),
|
| 367 |
+
)
|
| 368 |
+
affined = tf.ensure_shape(affined, images.shape)
|
| 369 |
+
|
| 370 |
+
if data_format == "channels_first":
|
| 371 |
+
affined = tf.transpose(affined, (0, 3, 1, 2))
|
| 372 |
+
if need_squeeze:
|
| 373 |
+
affined = tf.squeeze(affined, axis=0)
|
| 374 |
+
return affined
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
def _mirror_index_fixer(index, size):
|
| 378 |
+
s = size - 1 # Half-wavelength of triangular wave
|
| 379 |
+
# Scaled, integer-valued version of the triangular wave |x - round(x)|
|
| 380 |
+
return tf.abs((index + s) % (2 * s) - s)
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def _reflect_index_fixer(index, size):
|
| 384 |
+
return tf.math.floordiv(
|
| 385 |
+
_mirror_index_fixer(2 * index + 1, 2 * size + 1) - 1, 2
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
_INDEX_FIXERS = {
|
| 390 |
+
"constant": lambda index, size: index,
|
| 391 |
+
"nearest": lambda index, size: tf.clip_by_value(index, 0, size - 1),
|
| 392 |
+
"wrap": lambda index, size: index % size,
|
| 393 |
+
"mirror": _mirror_index_fixer,
|
| 394 |
+
"reflect": _reflect_index_fixer,
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def _nearest_indices_and_weights(coordinate):
|
| 399 |
+
coordinate = (
|
| 400 |
+
coordinate if coordinate.dtype.is_integer else tf.round(coordinate)
|
| 401 |
+
)
|
| 402 |
+
index = tf.cast(coordinate, tf.int32)
|
| 403 |
+
weight = tf.constant(1, coordinate.dtype)
|
| 404 |
+
return [(index, weight)]
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def _linear_indices_and_weights(coordinate):
|
| 408 |
+
lower = tf.floor(coordinate)
|
| 409 |
+
upper_weight = coordinate - lower
|
| 410 |
+
lower_weight = 1 - upper_weight
|
| 411 |
+
index = tf.cast(lower, tf.int32)
|
| 412 |
+
return [(index, lower_weight), (index + 1, upper_weight)]
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def map_coordinates(
|
| 416 |
+
inputs, coordinates, order, fill_mode="constant", fill_value=0.0
|
| 417 |
+
):
|
| 418 |
+
input_arr = convert_to_tensor(inputs)
|
| 419 |
+
coordinate_arrs = convert_to_tensor(coordinates)
|
| 420 |
+
|
| 421 |
+
if coordinate_arrs.shape[0] != len(input_arr.shape):
|
| 422 |
+
raise ValueError(
|
| 423 |
+
"First dim of `coordinates` must be the same as the rank of "
|
| 424 |
+
"`inputs`. "
|
| 425 |
+
f"Received inputs with shape: {input_arr.shape} and coordinate "
|
| 426 |
+
f"leading dim of {coordinate_arrs.shape[0]}"
|
| 427 |
+
)
|
| 428 |
+
if len(coordinate_arrs.shape) < 2:
|
| 429 |
+
raise ValueError(
|
| 430 |
+
"Invalid coordinates rank: expected at least rank 2."
|
| 431 |
+
f" Received input with shape: {coordinate_arrs.shape}"
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
# unstack into a list of tensors for following operations
|
| 435 |
+
coordinate_arrs = tf.unstack(coordinate_arrs, axis=0)
|
| 436 |
+
fill_value = convert_to_tensor(tf.cast(fill_value, input_arr.dtype))
|
| 437 |
+
|
| 438 |
+
index_fixer = _INDEX_FIXERS.get(fill_mode)
|
| 439 |
+
if index_fixer is None:
|
| 440 |
+
raise ValueError(
|
| 441 |
+
"Invalid value for argument `fill_mode`. Expected one of "
|
| 442 |
+
f"{set(_INDEX_FIXERS.keys())}. Received: "
|
| 443 |
+
f"fill_mode={fill_mode}"
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
def is_valid(index, size):
|
| 447 |
+
if fill_mode == "constant":
|
| 448 |
+
return (0 <= index) & (index < size)
|
| 449 |
+
else:
|
| 450 |
+
return True
|
| 451 |
+
|
| 452 |
+
if order == 0:
|
| 453 |
+
interp_fun = _nearest_indices_and_weights
|
| 454 |
+
elif order == 1:
|
| 455 |
+
interp_fun = _linear_indices_and_weights
|
| 456 |
+
else:
|
| 457 |
+
raise NotImplementedError("map_coordinates currently requires order<=1")
|
| 458 |
+
|
| 459 |
+
valid_1d_interpolations = []
|
| 460 |
+
for coordinate, size in zip(coordinate_arrs, input_arr.shape):
|
| 461 |
+
interp_nodes = interp_fun(coordinate)
|
| 462 |
+
valid_interp = []
|
| 463 |
+
for index, weight in interp_nodes:
|
| 464 |
+
fixed_index = index_fixer(index, size)
|
| 465 |
+
valid = is_valid(index, size)
|
| 466 |
+
valid_interp.append((fixed_index, valid, weight))
|
| 467 |
+
valid_1d_interpolations.append(valid_interp)
|
| 468 |
+
|
| 469 |
+
outputs = []
|
| 470 |
+
for items in itertools.product(*valid_1d_interpolations):
|
| 471 |
+
indices, validities, weights = zip(*items)
|
| 472 |
+
indices = tf.transpose(tf.stack(indices))
|
| 473 |
+
|
| 474 |
+
def fast_path():
|
| 475 |
+
return tf.transpose(tf.gather_nd(input_arr, indices))
|
| 476 |
+
|
| 477 |
+
def slow_path():
|
| 478 |
+
all_valid = functools.reduce(operator.and_, validities)
|
| 479 |
+
return tf.where(
|
| 480 |
+
all_valid,
|
| 481 |
+
tf.transpose(tf.gather_nd(input_arr, indices)),
|
| 482 |
+
fill_value,
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
contribution = tf.cond(tf.reduce_all(validities), fast_path, slow_path)
|
| 486 |
+
outputs.append(
|
| 487 |
+
functools.reduce(operator.mul, weights)
|
| 488 |
+
* tf.cast(contribution, weights[0].dtype)
|
| 489 |
+
)
|
| 490 |
+
result = functools.reduce(operator.add, outputs)
|
| 491 |
+
if input_arr.dtype.is_integer:
|
| 492 |
+
result = result if result.dtype.is_integer else tf.round(result)
|
| 493 |
+
return tf.cast(result, input_arr.dtype)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/layer.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
|
| 3 |
+
from keras.src import tree
|
| 4 |
+
from keras.src.backend.tensorflow.trackable import KerasAutoTrackable
|
| 5 |
+
from keras.src.utils import tf_utils
|
| 6 |
+
from keras.src.utils import tracking
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TFLayer(KerasAutoTrackable):
|
| 10 |
+
def __init__(self, *args, **kwargs):
|
| 11 |
+
# Export-related attributes
|
| 12 |
+
self._saved_model_inputs_spec = None
|
| 13 |
+
self._saved_model_arg_spec = None
|
| 14 |
+
self._tracked = []
|
| 15 |
+
|
| 16 |
+
@tf.__internal__.tracking.no_automatic_dependency_tracking
|
| 17 |
+
def _set_save_spec(self, inputs, args=None, kwargs=None):
|
| 18 |
+
"""Defines the save spec so that serialization can trace layer calls.
|
| 19 |
+
|
| 20 |
+
The TensorSpecs of the call function `inputs`, `args`, and `kwargs` are
|
| 21 |
+
saved into a tuple of `([inputs] + args, kwargs)`.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
inputs: possibly nested inputs passed into the call function.
|
| 25 |
+
args: a list of positional arguments passed into call.
|
| 26 |
+
kwargs: a dictionary of keyword arguments passed into call.
|
| 27 |
+
"""
|
| 28 |
+
if self._saved_model_inputs_spec is not None:
|
| 29 |
+
return # Already set.
|
| 30 |
+
|
| 31 |
+
inputs_spec = tree.map_structure(tf_utils.get_tensor_spec, inputs)
|
| 32 |
+
args_spec = tree.map_structure(tf_utils.get_tensor_spec, args or [])
|
| 33 |
+
kwargs_spec = {}
|
| 34 |
+
# Filter out non-tensor arguments from kwargs.
|
| 35 |
+
for key, kwarg in kwargs.items():
|
| 36 |
+
flat_kwarg = tree.flatten(kwarg)
|
| 37 |
+
flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg]
|
| 38 |
+
if any(s is None for s in flat_specs):
|
| 39 |
+
continue
|
| 40 |
+
kwargs_spec[key] = tree.pack_sequence_as(kwarg, flat_specs)
|
| 41 |
+
|
| 42 |
+
self._saved_model_inputs_spec = inputs_spec
|
| 43 |
+
self._saved_model_arg_spec = (
|
| 44 |
+
[inputs_spec] + list(args_spec),
|
| 45 |
+
kwargs_spec,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
def _trackable_children(self, save_type="checkpoint", **kwargs):
|
| 49 |
+
if save_type == "savedmodel":
|
| 50 |
+
# SavedModel needs to ignore the execution functions.
|
| 51 |
+
train_function = getattr(self, "train_function", None)
|
| 52 |
+
test_function = getattr(self, "test_function", None)
|
| 53 |
+
predict_function = getattr(self, "predict_function", None)
|
| 54 |
+
self.train_function = None
|
| 55 |
+
self.test_function = None
|
| 56 |
+
self.predict_function = None
|
| 57 |
+
|
| 58 |
+
children = super()._trackable_children(save_type, **kwargs)
|
| 59 |
+
|
| 60 |
+
if save_type == "savedmodel":
|
| 61 |
+
self.train_function = train_function
|
| 62 |
+
self.test_function = test_function
|
| 63 |
+
self.predict_function = predict_function
|
| 64 |
+
|
| 65 |
+
for tracked_attr in self._tracked:
|
| 66 |
+
tracked_item = getattr(self, tracked_attr)
|
| 67 |
+
if isinstance(tracked_item, tracking.TrackedList):
|
| 68 |
+
children[tracked_attr] = list(tracked_item)
|
| 69 |
+
if isinstance(tracked_item, tracking.TrackedDict):
|
| 70 |
+
children[tracked_attr] = dict(tracked_item)
|
| 71 |
+
if isinstance(tracked_item, tracking.TrackedSet):
|
| 72 |
+
children[tracked_attr] = list(tracked_item)
|
| 73 |
+
|
| 74 |
+
return children
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
def _default_save_signature(self):
|
| 78 |
+
"""For SavedModel support: returns the default serving signature."""
|
| 79 |
+
|
| 80 |
+
from keras.src.models.functional import Functional
|
| 81 |
+
from keras.src.models.model import Model
|
| 82 |
+
from keras.src.models.sequential import Sequential
|
| 83 |
+
|
| 84 |
+
if not isinstance(self, Model):
|
| 85 |
+
return None
|
| 86 |
+
|
| 87 |
+
inputs = None
|
| 88 |
+
if (
|
| 89 |
+
isinstance(self, Sequential)
|
| 90 |
+
and getattr(self, "_functional", None) is not None
|
| 91 |
+
):
|
| 92 |
+
inputs = self._functional.input
|
| 93 |
+
elif isinstance(self, Functional):
|
| 94 |
+
inputs = self.input
|
| 95 |
+
|
| 96 |
+
if inputs is not None:
|
| 97 |
+
input_signature = (
|
| 98 |
+
tree.map_structure(
|
| 99 |
+
lambda x: tf.TensorSpec(x.shape, x.dtype), inputs
|
| 100 |
+
),
|
| 101 |
+
)
|
| 102 |
+
else:
|
| 103 |
+
input_signature = tuple(
|
| 104 |
+
tree.map_shape_structure(
|
| 105 |
+
lambda s: tf.TensorSpec(s, self.input_dtype), value
|
| 106 |
+
)
|
| 107 |
+
for value in self._build_shapes_dict.values()
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
@tf.function(input_signature=input_signature)
|
| 111 |
+
def serving_default(inputs):
|
| 112 |
+
return self(inputs)
|
| 113 |
+
|
| 114 |
+
return serving_default
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/linalg.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
|
| 3 |
+
from keras.src.backend import config
|
| 4 |
+
from keras.src.backend import standardize_dtype
|
| 5 |
+
from keras.src.backend.common import dtypes
|
| 6 |
+
from keras.src.backend.tensorflow.core import cast
|
| 7 |
+
from keras.src.backend.tensorflow.core import convert_to_tensor
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def cholesky(a):
|
| 11 |
+
out = tf.linalg.cholesky(a)
|
| 12 |
+
# tf.linalg.cholesky simply returns NaNs for non-positive definite matrices
|
| 13 |
+
return tf.debugging.check_numerics(out, "Cholesky")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def det(a):
|
| 17 |
+
return tf.linalg.det(a)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def eig(a):
|
| 21 |
+
return tf.linalg.eig(a)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def eigh(a):
|
| 25 |
+
return tf.linalg.eigh(a)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def inv(a):
|
| 29 |
+
return tf.linalg.inv(a)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def lu_factor(a):
|
| 33 |
+
lu, p = tf.linalg.lu(a)
|
| 34 |
+
return lu, tf.math.invert_permutation(p)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def norm(x, ord=None, axis=None, keepdims=False):
|
| 38 |
+
from keras.src.backend.tensorflow.numpy import moveaxis
|
| 39 |
+
|
| 40 |
+
x = convert_to_tensor(x)
|
| 41 |
+
x_shape = x.shape
|
| 42 |
+
ndim = x_shape.rank
|
| 43 |
+
|
| 44 |
+
if axis is None:
|
| 45 |
+
axis = tuple(range(ndim))
|
| 46 |
+
elif isinstance(axis, int):
|
| 47 |
+
axis = (axis,)
|
| 48 |
+
if any(a < -ndim or a >= ndim for a in axis):
|
| 49 |
+
raise ValueError(
|
| 50 |
+
"All `axis` values must be in the range [-ndim, ndim). "
|
| 51 |
+
f"Received inputs with ndim={ndim}, while axis={axis}"
|
| 52 |
+
)
|
| 53 |
+
axis = axis[0] if len(axis) == 1 else axis
|
| 54 |
+
num_axes = 1 if isinstance(axis, int) else len(axis)
|
| 55 |
+
|
| 56 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 57 |
+
dtype = config.floatx()
|
| 58 |
+
else:
|
| 59 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 60 |
+
x = cast(x, dtype)
|
| 61 |
+
|
| 62 |
+
# Ref: jax.numpy.linalg.norm
|
| 63 |
+
if num_axes == 1:
|
| 64 |
+
if ord is None or ord == 2:
|
| 65 |
+
return tf.sqrt(
|
| 66 |
+
tf.reduce_sum(x * tf.math.conj(x), axis=axis, keepdims=keepdims)
|
| 67 |
+
)
|
| 68 |
+
elif ord == float("inf"):
|
| 69 |
+
return tf.math.reduce_max(
|
| 70 |
+
tf.math.abs(x), axis=axis, keepdims=keepdims
|
| 71 |
+
)
|
| 72 |
+
elif ord == float("-inf"):
|
| 73 |
+
return tf.math.reduce_min(
|
| 74 |
+
tf.math.abs(x), axis=axis, keepdims=keepdims
|
| 75 |
+
)
|
| 76 |
+
elif ord == 0:
|
| 77 |
+
return tf.math.reduce_sum(
|
| 78 |
+
tf.cast(tf.not_equal(x, 0), dtype=x.dtype),
|
| 79 |
+
axis=axis,
|
| 80 |
+
keepdims=keepdims,
|
| 81 |
+
)
|
| 82 |
+
elif isinstance(ord, str):
|
| 83 |
+
raise ValueError(
|
| 84 |
+
f"Invalid `ord` argument for vector norm. Received: ord={ord}"
|
| 85 |
+
)
|
| 86 |
+
else:
|
| 87 |
+
ord = convert_to_tensor(ord, dtype=x.dtype)
|
| 88 |
+
out = tf.math.reduce_sum(
|
| 89 |
+
tf.pow(tf.math.abs(x), ord), axis=axis, keepdims=keepdims
|
| 90 |
+
)
|
| 91 |
+
return tf.pow(out, 1.0 / ord)
|
| 92 |
+
elif num_axes == 2:
|
| 93 |
+
row_axis, col_axis = axis[0], axis[1]
|
| 94 |
+
row_axis = row_axis + ndim if row_axis < 0 else row_axis
|
| 95 |
+
col_axis = col_axis + ndim if col_axis < 0 else col_axis
|
| 96 |
+
if ord is None or ord == "fro":
|
| 97 |
+
return tf.sqrt(
|
| 98 |
+
tf.reduce_sum(x * tf.math.conj(x), axis=axis, keepdims=keepdims)
|
| 99 |
+
)
|
| 100 |
+
elif ord == 1:
|
| 101 |
+
if not keepdims and col_axis > row_axis:
|
| 102 |
+
col_axis -= 1
|
| 103 |
+
x = tf.math.reduce_max(
|
| 104 |
+
tf.reduce_sum(tf.math.abs(x), axis=row_axis, keepdims=keepdims),
|
| 105 |
+
axis=col_axis,
|
| 106 |
+
keepdims=keepdims,
|
| 107 |
+
)
|
| 108 |
+
elif ord == -1:
|
| 109 |
+
if not keepdims and col_axis > row_axis:
|
| 110 |
+
col_axis -= 1
|
| 111 |
+
x = tf.math.reduce_min(
|
| 112 |
+
tf.reduce_sum(tf.math.abs(x), axis=row_axis, keepdims=keepdims),
|
| 113 |
+
axis=col_axis,
|
| 114 |
+
keepdims=keepdims,
|
| 115 |
+
)
|
| 116 |
+
elif ord == float("inf"):
|
| 117 |
+
if not keepdims and row_axis > col_axis:
|
| 118 |
+
row_axis -= 1
|
| 119 |
+
x = tf.math.reduce_max(
|
| 120 |
+
tf.reduce_sum(tf.math.abs(x), axis=col_axis, keepdims=keepdims),
|
| 121 |
+
axis=row_axis,
|
| 122 |
+
keepdims=keepdims,
|
| 123 |
+
)
|
| 124 |
+
elif ord == float("-inf"):
|
| 125 |
+
if not keepdims and row_axis > col_axis:
|
| 126 |
+
row_axis -= 1
|
| 127 |
+
x = tf.math.reduce_min(
|
| 128 |
+
tf.reduce_sum(tf.math.abs(x), axis=col_axis, keepdims=keepdims),
|
| 129 |
+
axis=row_axis,
|
| 130 |
+
keepdims=keepdims,
|
| 131 |
+
)
|
| 132 |
+
elif ord in ("nuc", 2, -2):
|
| 133 |
+
x = moveaxis(x, axis, (-2, -1))
|
| 134 |
+
if ord == -2:
|
| 135 |
+
x = tf.math.reduce_min(
|
| 136 |
+
tf.linalg.svd(x, compute_uv=False), axis=-1
|
| 137 |
+
)
|
| 138 |
+
elif ord == 2:
|
| 139 |
+
x = tf.math.reduce_max(
|
| 140 |
+
tf.linalg.svd(x, compute_uv=False), axis=-1
|
| 141 |
+
)
|
| 142 |
+
else:
|
| 143 |
+
x = tf.math.reduce_sum(
|
| 144 |
+
tf.linalg.svd(x, compute_uv=False), axis=-1
|
| 145 |
+
)
|
| 146 |
+
if keepdims:
|
| 147 |
+
x = tf.expand_dims(x, axis[0])
|
| 148 |
+
x = tf.expand_dims(x, axis[1])
|
| 149 |
+
else:
|
| 150 |
+
raise ValueError(
|
| 151 |
+
f"Invalid `ord` argument for matrix norm. Received: ord={ord}"
|
| 152 |
+
)
|
| 153 |
+
return x
|
| 154 |
+
else:
|
| 155 |
+
raise ValueError(f"Invalid axis values. Received: axis={axis}")
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def qr(x, mode="reduced"):
|
| 159 |
+
if mode not in {"reduced", "complete"}:
|
| 160 |
+
raise ValueError(
|
| 161 |
+
"`mode` argument value not supported. "
|
| 162 |
+
"Expected one of {'reduced', 'complete'}. "
|
| 163 |
+
f"Received: mode={mode}"
|
| 164 |
+
)
|
| 165 |
+
if mode == "reduced":
|
| 166 |
+
return tf.linalg.qr(x)
|
| 167 |
+
return tf.linalg.qr(x, full_matrices=True)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def solve(a, b):
|
| 171 |
+
# tensorflow.linalg.solve only supports same rank inputs
|
| 172 |
+
if tf.rank(b) == tf.rank(a) - 1:
|
| 173 |
+
b = tf.expand_dims(b, axis=-1)
|
| 174 |
+
return tf.squeeze(tf.linalg.solve(a, b), axis=-1)
|
| 175 |
+
return tf.linalg.solve(a, b)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def solve_triangular(a, b, lower=False):
|
| 179 |
+
if b.shape.ndims == a.shape.ndims - 1:
|
| 180 |
+
b = tf.expand_dims(b, axis=-1)
|
| 181 |
+
return tf.squeeze(
|
| 182 |
+
tf.linalg.triangular_solve(a, b, lower=lower), axis=-1
|
| 183 |
+
)
|
| 184 |
+
return tf.linalg.triangular_solve(a, b, lower=lower)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def svd(x, full_matrices=True, compute_uv=True):
|
| 188 |
+
if compute_uv is False:
|
| 189 |
+
return tf.linalg.svd(x, full_matrices=full_matrices, compute_uv=False)
|
| 190 |
+
s, u, v = tf.linalg.svd(
|
| 191 |
+
x, full_matrices=full_matrices, compute_uv=compute_uv
|
| 192 |
+
)
|
| 193 |
+
return u, s, tf.linalg.adjoint(v)
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def lstsq(a, b, rcond=None):
|
| 197 |
+
a = convert_to_tensor(a)
|
| 198 |
+
b = convert_to_tensor(b)
|
| 199 |
+
if a.shape[0] != b.shape[0]:
|
| 200 |
+
raise ValueError("Leading dimensions of input arrays must match")
|
| 201 |
+
b_orig_ndim = b.ndim
|
| 202 |
+
if b_orig_ndim == 1:
|
| 203 |
+
b = b[:, None]
|
| 204 |
+
if a.ndim != 2:
|
| 205 |
+
raise TypeError(
|
| 206 |
+
f"{a.ndim}-dimensional array given. "
|
| 207 |
+
"Array must be two-dimensional"
|
| 208 |
+
)
|
| 209 |
+
if b.ndim != 2:
|
| 210 |
+
raise TypeError(
|
| 211 |
+
f"{b.ndim}-dimensional array given. "
|
| 212 |
+
"Array must be one or two-dimensional"
|
| 213 |
+
)
|
| 214 |
+
m, n = a.shape
|
| 215 |
+
dtype = a.dtype
|
| 216 |
+
eps = tf.experimental.numpy.finfo(dtype).eps
|
| 217 |
+
if a.shape == ():
|
| 218 |
+
s = tf.zeros(0, dtype=a.dtype)
|
| 219 |
+
x = tf.zeros((n, *b.shape[1:]), dtype=a.dtype)
|
| 220 |
+
else:
|
| 221 |
+
if rcond is None:
|
| 222 |
+
rcond = eps * max(n, m)
|
| 223 |
+
else:
|
| 224 |
+
rcond = tf.where(rcond < 0, eps, rcond)
|
| 225 |
+
u, s, vt = svd(a, full_matrices=False)
|
| 226 |
+
mask = s >= tf.convert_to_tensor(rcond, dtype=s.dtype) * s[0]
|
| 227 |
+
safe_s = tf.cast(tf.where(mask, s, 1), dtype=a.dtype)
|
| 228 |
+
s_inv = tf.where(mask, 1 / safe_s, 0)[:, tf.newaxis]
|
| 229 |
+
u_t_b = tf.matmul(tf.transpose(tf.math.conj(u)), b)
|
| 230 |
+
x = tf.matmul(tf.transpose(tf.math.conj(vt)), s_inv * u_t_b)
|
| 231 |
+
|
| 232 |
+
if b_orig_ndim == 1:
|
| 233 |
+
x = tf.reshape(x, [-1])
|
| 234 |
+
return x
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/math.py
ADDED
|
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
|
| 3 |
+
from keras.src.backend import config
|
| 4 |
+
from keras.src.backend import standardize_dtype
|
| 5 |
+
from keras.src.backend.common import dtypes
|
| 6 |
+
from keras.src.backend.tensorflow.core import cast
|
| 7 |
+
from keras.src.backend.tensorflow.core import convert_to_tensor
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
|
| 11 |
+
if sorted:
|
| 12 |
+
if num_segments is not None:
|
| 13 |
+
raise ValueError(
|
| 14 |
+
"Argument `num_segments` cannot be set when sorted is True "
|
| 15 |
+
"when using the tensorflow backend."
|
| 16 |
+
f"Received: num_segments={num_segments}, sorted={sorted}."
|
| 17 |
+
)
|
| 18 |
+
return tf.math.segment_sum(data, segment_ids)
|
| 19 |
+
else:
|
| 20 |
+
if num_segments is None:
|
| 21 |
+
unique_segment_ids, _ = tf.unique(segment_ids)
|
| 22 |
+
num_segments = tf.shape(unique_segment_ids)[0]
|
| 23 |
+
return tf.math.unsorted_segment_sum(data, segment_ids, num_segments)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def segment_max(data, segment_ids, num_segments=None, sorted=False):
|
| 27 |
+
if sorted:
|
| 28 |
+
if num_segments is not None:
|
| 29 |
+
raise ValueError(
|
| 30 |
+
"Argument `num_segments` cannot be set when sorted is True "
|
| 31 |
+
"when using the tensorflow backend."
|
| 32 |
+
f"Received: num_segments={num_segments}, sorted={sorted}."
|
| 33 |
+
)
|
| 34 |
+
return tf.math.segment_max(data, segment_ids)
|
| 35 |
+
else:
|
| 36 |
+
if num_segments is None:
|
| 37 |
+
unique_segment_ids, _ = tf.unique(segment_ids)
|
| 38 |
+
num_segments = tf.shape(unique_segment_ids)[0]
|
| 39 |
+
return tf.math.unsorted_segment_max(data, segment_ids, num_segments)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def top_k(x, k, sorted=True):
|
| 43 |
+
return tf.math.top_k(x, k, sorted=sorted)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def in_top_k(targets, predictions, k):
|
| 47 |
+
return tf.math.in_top_k(targets, predictions, k)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def logsumexp(x, axis=None, keepdims=False):
|
| 51 |
+
return tf.math.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def qr(x, mode="reduced"):
|
| 55 |
+
if mode not in {"reduced", "complete"}:
|
| 56 |
+
raise ValueError(
|
| 57 |
+
"`mode` argument value not supported. "
|
| 58 |
+
"Expected one of {'reduced', 'complete'}. "
|
| 59 |
+
f"Received: mode={mode}"
|
| 60 |
+
)
|
| 61 |
+
if mode == "reduced":
|
| 62 |
+
return tf.linalg.qr(x)
|
| 63 |
+
return tf.linalg.qr(x, full_matrices=True)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def extract_sequences(x, sequence_length, sequence_stride):
|
| 67 |
+
return tf.signal.frame(
|
| 68 |
+
x,
|
| 69 |
+
frame_length=sequence_length,
|
| 70 |
+
frame_step=sequence_stride,
|
| 71 |
+
axis=-1,
|
| 72 |
+
pad_end=False,
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _get_complex_tensor_from_tuple(x):
|
| 77 |
+
if not isinstance(x, (tuple, list)) or len(x) != 2:
|
| 78 |
+
raise ValueError(
|
| 79 |
+
"Input `x` should be a tuple of two tensors - real and imaginary."
|
| 80 |
+
f"Received: x={x}"
|
| 81 |
+
)
|
| 82 |
+
# `convert_to_tensor` does not support passing complex tensors. We separate
|
| 83 |
+
# the input out into real and imaginary and convert them separately.
|
| 84 |
+
real, imag = x
|
| 85 |
+
real = convert_to_tensor(real)
|
| 86 |
+
imag = convert_to_tensor(imag)
|
| 87 |
+
# Check shapes.
|
| 88 |
+
if real.shape != imag.shape:
|
| 89 |
+
raise ValueError(
|
| 90 |
+
"Input `x` should be a tuple of two tensors - real and imaginary."
|
| 91 |
+
"Both the real and imaginary parts should have the same shape. "
|
| 92 |
+
f"Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}"
|
| 93 |
+
)
|
| 94 |
+
# Ensure dtype is float.
|
| 95 |
+
if not real.dtype.is_floating or not imag.dtype.is_floating:
|
| 96 |
+
raise ValueError(
|
| 97 |
+
"At least one tensor in input `x` is not of type float."
|
| 98 |
+
f"Received: x={x}."
|
| 99 |
+
)
|
| 100 |
+
complex_input = tf.dtypes.complex(real, imag)
|
| 101 |
+
return complex_input
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def fft(x):
|
| 105 |
+
complex_input = _get_complex_tensor_from_tuple(x)
|
| 106 |
+
complex_output = tf.signal.fft(complex_input)
|
| 107 |
+
return tf.math.real(complex_output), tf.math.imag(complex_output)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def fft2(x):
|
| 111 |
+
complex_input = _get_complex_tensor_from_tuple(x)
|
| 112 |
+
complex_output = tf.signal.fft2d(complex_input)
|
| 113 |
+
return tf.math.real(complex_output), tf.math.imag(complex_output)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def ifft2(x):
|
| 117 |
+
real, imag = x
|
| 118 |
+
h = cast(tf.shape(real)[-2], "float32")
|
| 119 |
+
w = cast(tf.shape(real)[-1], "float32")
|
| 120 |
+
real_conj, imag_conj = real, -imag
|
| 121 |
+
fft_real, fft_imag = fft2((real_conj, imag_conj))
|
| 122 |
+
return fft_real / (h * w), -fft_imag / (h * w)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def rfft(x, fft_length=None):
|
| 126 |
+
if fft_length is not None:
|
| 127 |
+
fft_length = [fft_length]
|
| 128 |
+
complex_output = tf.signal.rfft(x, fft_length=fft_length)
|
| 129 |
+
return tf.math.real(complex_output), tf.math.imag(complex_output)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def irfft(x, fft_length=None):
|
| 133 |
+
complex_input = _get_complex_tensor_from_tuple(x)
|
| 134 |
+
if fft_length is not None:
|
| 135 |
+
fft_length = [fft_length]
|
| 136 |
+
return tf.signal.irfft(complex_input, fft_length)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def stft(
|
| 140 |
+
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
|
| 141 |
+
):
|
| 142 |
+
if standardize_dtype(x.dtype) not in {"float32", "float64"}:
|
| 143 |
+
raise TypeError(
|
| 144 |
+
"Invalid input type. Expected `float32` or `float64`. "
|
| 145 |
+
f"Received: input type={x.dtype}"
|
| 146 |
+
)
|
| 147 |
+
if fft_length < sequence_length:
|
| 148 |
+
raise ValueError(
|
| 149 |
+
"`fft_length` must equal or larger than `sequence_length`. "
|
| 150 |
+
f"Received: sequence_length={sequence_length}, "
|
| 151 |
+
f"fft_length={fft_length}"
|
| 152 |
+
)
|
| 153 |
+
if isinstance(window, str):
|
| 154 |
+
if window not in {"hann", "hamming"}:
|
| 155 |
+
raise ValueError(
|
| 156 |
+
"If a string is passed to `window`, it must be one of "
|
| 157 |
+
f'`"hann"`, `"hamming"`. Received: window={window}'
|
| 158 |
+
)
|
| 159 |
+
x = convert_to_tensor(x)
|
| 160 |
+
|
| 161 |
+
if center:
|
| 162 |
+
pad_width = [(0, 0) for _ in range(len(x.shape))]
|
| 163 |
+
pad_width[-1] = (fft_length // 2, fft_length // 2)
|
| 164 |
+
x = tf.pad(x, pad_width, mode="reflect")
|
| 165 |
+
|
| 166 |
+
l_pad = (fft_length - sequence_length) // 2
|
| 167 |
+
r_pad = fft_length - sequence_length - l_pad
|
| 168 |
+
|
| 169 |
+
if window is not None:
|
| 170 |
+
if isinstance(window, str):
|
| 171 |
+
if window == "hann":
|
| 172 |
+
win_array = tf.signal.hann_window(
|
| 173 |
+
sequence_length, periodic=True, dtype=x.dtype
|
| 174 |
+
)
|
| 175 |
+
else:
|
| 176 |
+
win_array = tf.signal.hamming_window(
|
| 177 |
+
sequence_length, periodic=True, dtype=x.dtype
|
| 178 |
+
)
|
| 179 |
+
else:
|
| 180 |
+
win_array = convert_to_tensor(window, dtype=x.dtype)
|
| 181 |
+
if len(win_array.shape) != 1 or win_array.shape[-1] != sequence_length:
|
| 182 |
+
raise ValueError(
|
| 183 |
+
"The shape of `window` must be equal to [sequence_length]."
|
| 184 |
+
f"Received: window shape={win_array.shape}"
|
| 185 |
+
)
|
| 186 |
+
win_array = tf.pad(win_array, [[l_pad, r_pad]])
|
| 187 |
+
|
| 188 |
+
def win(frame_step, dtype):
|
| 189 |
+
return win_array
|
| 190 |
+
|
| 191 |
+
else:
|
| 192 |
+
win = None
|
| 193 |
+
|
| 194 |
+
result = tf.signal.stft(
|
| 195 |
+
x,
|
| 196 |
+
frame_length=(sequence_length + l_pad + r_pad),
|
| 197 |
+
frame_step=sequence_stride,
|
| 198 |
+
fft_length=fft_length,
|
| 199 |
+
window_fn=win,
|
| 200 |
+
)
|
| 201 |
+
return tf.math.real(result), tf.math.imag(result)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def istft(
|
| 205 |
+
x,
|
| 206 |
+
sequence_length,
|
| 207 |
+
sequence_stride,
|
| 208 |
+
fft_length,
|
| 209 |
+
length=None,
|
| 210 |
+
window="hann",
|
| 211 |
+
center=True,
|
| 212 |
+
):
|
| 213 |
+
complex_input = _get_complex_tensor_from_tuple(x)
|
| 214 |
+
dtype = tf.math.real(complex_input).dtype
|
| 215 |
+
|
| 216 |
+
expected_output_len = fft_length + sequence_stride * (
|
| 217 |
+
tf.shape(complex_input)[-2] - 1
|
| 218 |
+
)
|
| 219 |
+
l_pad = (fft_length - sequence_length) // 2
|
| 220 |
+
r_pad = fft_length - sequence_length - l_pad
|
| 221 |
+
|
| 222 |
+
if window is not None:
|
| 223 |
+
if isinstance(window, str):
|
| 224 |
+
if window == "hann":
|
| 225 |
+
win_array = tf.signal.hann_window(
|
| 226 |
+
sequence_length, periodic=True, dtype=dtype
|
| 227 |
+
)
|
| 228 |
+
else:
|
| 229 |
+
win_array = tf.signal.hamming_window(
|
| 230 |
+
sequence_length, periodic=True, dtype=dtype
|
| 231 |
+
)
|
| 232 |
+
else:
|
| 233 |
+
win_array = convert_to_tensor(window, dtype=dtype)
|
| 234 |
+
if len(win_array.shape) != 1 or win_array.shape[-1] != sequence_length:
|
| 235 |
+
raise ValueError(
|
| 236 |
+
"The shape of `window` must be equal to [sequence_length]."
|
| 237 |
+
f"Received: window shape={win_array.shape}"
|
| 238 |
+
)
|
| 239 |
+
win_array = tf.pad(win_array, [[l_pad, r_pad]])
|
| 240 |
+
win = tf.signal.inverse_stft_window_fn(
|
| 241 |
+
sequence_stride, lambda frame_step, dtype: win_array
|
| 242 |
+
)
|
| 243 |
+
else:
|
| 244 |
+
win = None
|
| 245 |
+
|
| 246 |
+
x = tf.signal.inverse_stft(
|
| 247 |
+
complex_input,
|
| 248 |
+
frame_length=(sequence_length + l_pad + r_pad),
|
| 249 |
+
frame_step=sequence_stride,
|
| 250 |
+
fft_length=fft_length,
|
| 251 |
+
window_fn=win,
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
start = 0 if center is False else fft_length // 2
|
| 255 |
+
if length is not None:
|
| 256 |
+
end = start + length
|
| 257 |
+
elif center is True:
|
| 258 |
+
end = -(fft_length // 2)
|
| 259 |
+
else:
|
| 260 |
+
end = expected_output_len
|
| 261 |
+
return x[..., start:end]
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def rsqrt(x):
|
| 265 |
+
return tf.math.rsqrt(x)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def erf(x):
|
| 269 |
+
return tf.math.erf(x)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def erfinv(x):
|
| 273 |
+
return tf.math.erfinv(x)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def solve(a, b):
|
| 277 |
+
a = convert_to_tensor(a)
|
| 278 |
+
b = convert_to_tensor(b)
|
| 279 |
+
return tf.linalg.solve(a, b)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def norm(x, ord=None, axis=None, keepdims=False):
|
| 283 |
+
from keras.src.backend.tensorflow.numpy import moveaxis
|
| 284 |
+
|
| 285 |
+
x = convert_to_tensor(x)
|
| 286 |
+
x_shape = x.shape
|
| 287 |
+
ndim = x_shape.rank
|
| 288 |
+
|
| 289 |
+
if axis is None:
|
| 290 |
+
axis = tuple(range(ndim))
|
| 291 |
+
elif isinstance(axis, int):
|
| 292 |
+
axis = (axis,)
|
| 293 |
+
|
| 294 |
+
axis = axis[0] if len(axis) == 1 else axis
|
| 295 |
+
num_axes = 1 if isinstance(axis, int) else len(axis)
|
| 296 |
+
|
| 297 |
+
if num_axes == 1 and ord is None:
|
| 298 |
+
ord = "euclidean"
|
| 299 |
+
elif num_axes == 2 and ord is None:
|
| 300 |
+
ord = "fro"
|
| 301 |
+
|
| 302 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 303 |
+
dtype = config.floatx()
|
| 304 |
+
else:
|
| 305 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 306 |
+
x = cast(x, dtype)
|
| 307 |
+
|
| 308 |
+
# Fast path to utilize `tf.linalg.norm`
|
| 309 |
+
if (num_axes == 1 and ord in ("euclidean", 1, 2, float("inf"))) or (
|
| 310 |
+
num_axes == 2 and ord in ("euclidean", "fro", 1, 2, float("inf"))
|
| 311 |
+
):
|
| 312 |
+
return tf.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
|
| 313 |
+
|
| 314 |
+
# Ref: jax.numpy.linalg.norm
|
| 315 |
+
if num_axes == 1 and ord not in ("fro", "nuc"):
|
| 316 |
+
if ord == float("-inf"):
|
| 317 |
+
return tf.math.reduce_min(
|
| 318 |
+
tf.math.abs(x), axis=axis, keepdims=keepdims
|
| 319 |
+
)
|
| 320 |
+
elif ord == 0:
|
| 321 |
+
return tf.math.reduce_sum(
|
| 322 |
+
tf.cast(tf.not_equal(x, 0), dtype=x.dtype),
|
| 323 |
+
axis=axis,
|
| 324 |
+
keepdims=keepdims,
|
| 325 |
+
)
|
| 326 |
+
else:
|
| 327 |
+
ord = convert_to_tensor(ord, dtype=x.dtype)
|
| 328 |
+
out = tf.math.reduce_sum(
|
| 329 |
+
tf.pow(tf.math.abs(x), ord), axis=axis, keepdims=keepdims
|
| 330 |
+
)
|
| 331 |
+
return tf.pow(out, 1.0 / ord)
|
| 332 |
+
elif num_axes == 2 and ord in ("nuc", float("-inf"), -2, -1):
|
| 333 |
+
row_axis, col_axis = axis[0], axis[1]
|
| 334 |
+
row_axis = row_axis + ndim if row_axis < 0 else row_axis
|
| 335 |
+
col_axis = col_axis + ndim if col_axis < 0 else col_axis
|
| 336 |
+
if ord == float("-inf"):
|
| 337 |
+
if not keepdims and row_axis > col_axis:
|
| 338 |
+
row_axis -= 1
|
| 339 |
+
x = tf.math.reduce_min(
|
| 340 |
+
tf.reduce_sum(tf.math.abs(x), axis=col_axis, keepdims=keepdims),
|
| 341 |
+
axis=row_axis,
|
| 342 |
+
keepdims=keepdims,
|
| 343 |
+
)
|
| 344 |
+
elif ord == -1:
|
| 345 |
+
if not keepdims and col_axis > row_axis:
|
| 346 |
+
col_axis -= 1
|
| 347 |
+
x = tf.math.reduce_min(
|
| 348 |
+
tf.reduce_sum(tf.math.abs(x), axis=row_axis, keepdims=keepdims),
|
| 349 |
+
axis=col_axis,
|
| 350 |
+
keepdims=keepdims,
|
| 351 |
+
)
|
| 352 |
+
else:
|
| 353 |
+
x = moveaxis(x, axis, (-2, -1))
|
| 354 |
+
if ord == -2:
|
| 355 |
+
x = tf.math.reduce_min(
|
| 356 |
+
tf.linalg.svd(x, compute_uv=False), axis=-1
|
| 357 |
+
)
|
| 358 |
+
else:
|
| 359 |
+
x = tf.math.reduce_sum(
|
| 360 |
+
tf.linalg.svd(x, compute_uv=False), axis=-1
|
| 361 |
+
)
|
| 362 |
+
if keepdims:
|
| 363 |
+
x = tf.expand_dims(x, axis[0])
|
| 364 |
+
x = tf.expand_dims(x, axis[1])
|
| 365 |
+
return x
|
| 366 |
+
|
| 367 |
+
if num_axes == 1:
|
| 368 |
+
raise ValueError(
|
| 369 |
+
f"Invalid `ord` argument for vector norm. Received: ord={ord}"
|
| 370 |
+
)
|
| 371 |
+
elif num_axes == 2:
|
| 372 |
+
raise ValueError(
|
| 373 |
+
f"Invalid `ord` argument for matrix norm. Received: ord={ord}"
|
| 374 |
+
)
|
| 375 |
+
else:
|
| 376 |
+
raise ValueError(f"Invalid axis values. Received: axis={axis}")
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def logdet(x):
|
| 380 |
+
x = convert_to_tensor(x)
|
| 381 |
+
return tf.linalg.logdet(x)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/nn.py
ADDED
|
@@ -0,0 +1,1068 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
import tensorflow as tf
|
| 5 |
+
|
| 6 |
+
from keras.src import backend
|
| 7 |
+
from keras.src.backend.common.backend_utils import (
|
| 8 |
+
compute_conv_transpose_output_shape,
|
| 9 |
+
)
|
| 10 |
+
from keras.src.backend.tensorflow.core import cast
|
| 11 |
+
from keras.src.backend.tensorflow.core import convert_to_tensor
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def relu(x):
|
| 15 |
+
return tf.nn.relu(x)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def relu6(x):
|
| 19 |
+
return tf.nn.relu6(x)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def sigmoid(x):
|
| 23 |
+
logits = x
|
| 24 |
+
output = tf.nn.sigmoid(x)
|
| 25 |
+
output._keras_logits = logits
|
| 26 |
+
return output
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def tanh(x):
|
| 30 |
+
return tf.nn.tanh(x)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def tanh_shrink(x):
|
| 34 |
+
return x - tf.math.tanh(x)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def softplus(x):
|
| 38 |
+
return tf.math.softplus(x)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def softsign(x):
|
| 42 |
+
return tf.nn.softsign(x)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def soft_shrink(x, threshold=0.5):
|
| 46 |
+
return tf.where(
|
| 47 |
+
x > threshold,
|
| 48 |
+
x - threshold,
|
| 49 |
+
tf.where(x < -threshold, x + threshold, tf.zeros_like(x)),
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def sparse_plus(x):
|
| 54 |
+
return tf.where(
|
| 55 |
+
x <= -1,
|
| 56 |
+
tf.zeros_like(x),
|
| 57 |
+
tf.where(x < 1, (1 / 4) * tf.pow(x + 1, 2), x),
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def silu(x):
|
| 62 |
+
return tf.nn.silu(x)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def squareplus(x, b=4):
|
| 66 |
+
x = convert_to_tensor(x)
|
| 67 |
+
b = convert_to_tensor(b, dtype=x.dtype)
|
| 68 |
+
y = x + tf.sqrt(tf.square(x) + b)
|
| 69 |
+
return y / 2
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def log_sigmoid(x):
|
| 73 |
+
return tf.math.log_sigmoid(x)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def leaky_relu(x, negative_slope=0.2):
|
| 77 |
+
return tf.nn.leaky_relu(x, alpha=negative_slope)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def hard_sigmoid(x):
|
| 81 |
+
x = convert_to_tensor(x)
|
| 82 |
+
return relu6(x + tf.constant(3.0, x.dtype)) / tf.constant(6.0, x.dtype)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def hard_silu(x):
|
| 86 |
+
return x * hard_sigmoid(x)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def elu(x, alpha=1.0):
|
| 90 |
+
res = tf.nn.elu(x)
|
| 91 |
+
if alpha == 1:
|
| 92 |
+
return res
|
| 93 |
+
else:
|
| 94 |
+
return tf.where(x > 0, res, alpha * res)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def selu(x):
|
| 98 |
+
return tf.nn.selu(x)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def gelu(x, approximate=True):
|
| 102 |
+
x = convert_to_tensor(x)
|
| 103 |
+
return tf.nn.gelu(x, approximate=approximate)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def celu(x, alpha=1.0):
|
| 107 |
+
return tf.maximum(x, 0.0) + alpha * tf.math.expm1(
|
| 108 |
+
tf.minimum(x, 0.0) / alpha
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def glu(x, axis=-1):
|
| 113 |
+
if x.shape[axis] % 2 != 0:
|
| 114 |
+
raise ValueError(
|
| 115 |
+
"axis size must be divisible by 2. "
|
| 116 |
+
f"Received: x.shape={x.shape} with axis={axis}"
|
| 117 |
+
)
|
| 118 |
+
x1, x2 = tf.split(x, num_or_size_splits=2, axis=axis)
|
| 119 |
+
return x1 * tf.sigmoid(x2)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def hard_tanh(x):
|
| 123 |
+
return tf.clip_by_value(x, clip_value_min=-1.0, clip_value_max=1.0)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def hard_shrink(x, threshold=0.5):
|
| 127 |
+
return tf.where(tf.abs(x) > threshold, x, tf.zeros_like(x))
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def threshold(x, threshold, default_value):
|
| 131 |
+
return tf.where(x > threshold, x, default_value)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def softmax(x, axis=-1):
|
| 135 |
+
logits = x
|
| 136 |
+
if axis is None:
|
| 137 |
+
# Unlike numpy, tf will handle axis=None as axis=-1.
|
| 138 |
+
# We need this workaround for the reduction on every dim.
|
| 139 |
+
output = tf.reshape(x, [-1])
|
| 140 |
+
output = tf.nn.softmax(output, axis=-1)
|
| 141 |
+
output = tf.reshape(output, tf.shape(x))
|
| 142 |
+
else:
|
| 143 |
+
output = tf.nn.softmax(x, axis=axis)
|
| 144 |
+
output._keras_logits = logits
|
| 145 |
+
return output
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def log_softmax(x, axis=-1):
|
| 149 |
+
if axis is None:
|
| 150 |
+
# Unlike numpy, tf will handle axis=None as axis=-1.
|
| 151 |
+
# We need this workaround for the reduction on every dim.
|
| 152 |
+
output = tf.reshape(x, [-1])
|
| 153 |
+
output = tf.nn.log_softmax(output, axis=-1)
|
| 154 |
+
return tf.reshape(output, tf.shape(x))
|
| 155 |
+
return tf.nn.log_softmax(x, axis=axis)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def sparsemax(logits, axis=-1):
|
| 159 |
+
# Sort logits along the specified axis in descending order
|
| 160 |
+
logits = convert_to_tensor(logits)
|
| 161 |
+
logits_sorted = tf.sort(logits, direction="DESCENDING", axis=axis)
|
| 162 |
+
logits_cumsum = tf.cumsum(logits_sorted, axis=axis)
|
| 163 |
+
r = tf.range(1, tf.shape(logits)[axis] + 1, dtype=logits.dtype)
|
| 164 |
+
r_shape = [1] * len(logits.shape)
|
| 165 |
+
r_shape[axis] = -1 # Broadcast to match the target axis
|
| 166 |
+
r = tf.reshape(r, r_shape) # Reshape for broadcasting
|
| 167 |
+
support = logits_sorted - (logits_cumsum - 1) / r > 0
|
| 168 |
+
# Find the threshold
|
| 169 |
+
logits_cumsum_safe = tf.where(support, logits_cumsum, 0.0)
|
| 170 |
+
k = tf.reduce_sum(tf.cast(support, logits.dtype), axis=axis, keepdims=True)
|
| 171 |
+
tau = (tf.reduce_sum(logits_cumsum_safe, axis=axis, keepdims=True) - 1) / k
|
| 172 |
+
output = tf.maximum(logits - tau, 0.0)
|
| 173 |
+
return output
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _transpose_spatial_inputs(inputs):
|
| 177 |
+
num_spatial_dims = len(inputs.shape) - 2
|
| 178 |
+
# Tensorflow pooling does not support `channels_first` format, so
|
| 179 |
+
# we need to transpose to `channels_last` format.
|
| 180 |
+
if num_spatial_dims == 1:
|
| 181 |
+
inputs = tf.transpose(inputs, (0, 2, 1))
|
| 182 |
+
elif num_spatial_dims == 2:
|
| 183 |
+
inputs = tf.transpose(inputs, (0, 2, 3, 1))
|
| 184 |
+
elif num_spatial_dims == 3:
|
| 185 |
+
inputs = tf.transpose(inputs, (0, 2, 3, 4, 1))
|
| 186 |
+
else:
|
| 187 |
+
raise ValueError(
|
| 188 |
+
"Pooling inputs's shape must be 3, 4 or 5, corresponding to 1D, 2D "
|
| 189 |
+
f"and 3D inputs. But received shape: {inputs.shape}."
|
| 190 |
+
)
|
| 191 |
+
return inputs
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def _transpose_spatial_outputs(outputs):
|
| 195 |
+
# Undo the transpose in `_transpose_spatial_inputs`.
|
| 196 |
+
num_spatial_dims = len(outputs.shape) - 2
|
| 197 |
+
if num_spatial_dims == 1:
|
| 198 |
+
outputs = tf.transpose(outputs, (0, 2, 1))
|
| 199 |
+
elif num_spatial_dims == 2:
|
| 200 |
+
outputs = tf.transpose(outputs, (0, 3, 1, 2))
|
| 201 |
+
elif num_spatial_dims == 3:
|
| 202 |
+
outputs = tf.transpose(outputs, (0, 4, 1, 2, 3))
|
| 203 |
+
return outputs
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def max_pool(
|
| 207 |
+
inputs,
|
| 208 |
+
pool_size,
|
| 209 |
+
strides=None,
|
| 210 |
+
padding="valid",
|
| 211 |
+
data_format=None,
|
| 212 |
+
):
|
| 213 |
+
data_format = backend.standardize_data_format(data_format)
|
| 214 |
+
strides = pool_size if strides is None else strides
|
| 215 |
+
padding = padding.upper()
|
| 216 |
+
tf_data_format = _convert_data_format("channels_last", len(inputs.shape))
|
| 217 |
+
if data_format == "channels_first":
|
| 218 |
+
# Tensorflow pooling does not support `channels_first` format, so
|
| 219 |
+
# we need to transpose to `channels_last` format.
|
| 220 |
+
inputs = _transpose_spatial_inputs(inputs)
|
| 221 |
+
|
| 222 |
+
outputs = tf.nn.max_pool(
|
| 223 |
+
inputs,
|
| 224 |
+
pool_size,
|
| 225 |
+
strides,
|
| 226 |
+
padding,
|
| 227 |
+
tf_data_format,
|
| 228 |
+
)
|
| 229 |
+
if data_format == "channels_first":
|
| 230 |
+
outputs = _transpose_spatial_outputs(outputs)
|
| 231 |
+
return outputs
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def average_pool(
|
| 235 |
+
inputs,
|
| 236 |
+
pool_size,
|
| 237 |
+
strides=None,
|
| 238 |
+
padding="valid",
|
| 239 |
+
data_format=None,
|
| 240 |
+
):
|
| 241 |
+
data_format = backend.standardize_data_format(data_format)
|
| 242 |
+
strides = pool_size if strides is None else strides
|
| 243 |
+
padding = padding.upper()
|
| 244 |
+
tf_data_format = _convert_data_format("channels_last", len(inputs.shape))
|
| 245 |
+
if data_format == "channels_first":
|
| 246 |
+
# Tensorflow pooling does not support `channels_first` format, so
|
| 247 |
+
# we need to transpose to `channels_last` format.
|
| 248 |
+
inputs = _transpose_spatial_inputs(inputs)
|
| 249 |
+
|
| 250 |
+
outputs = tf.nn.avg_pool(
|
| 251 |
+
inputs,
|
| 252 |
+
pool_size,
|
| 253 |
+
strides,
|
| 254 |
+
padding,
|
| 255 |
+
tf_data_format,
|
| 256 |
+
)
|
| 257 |
+
if data_format == "channels_first":
|
| 258 |
+
outputs = _transpose_spatial_outputs(outputs)
|
| 259 |
+
return outputs
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def _convert_data_format(data_format, ndim):
|
| 263 |
+
if data_format == "channels_last":
|
| 264 |
+
if ndim == 3:
|
| 265 |
+
return "NWC"
|
| 266 |
+
elif ndim == 4:
|
| 267 |
+
return "NHWC"
|
| 268 |
+
elif ndim == 5:
|
| 269 |
+
return "NDHWC"
|
| 270 |
+
else:
|
| 271 |
+
raise ValueError(
|
| 272 |
+
f"Input rank not supported: {ndim}. "
|
| 273 |
+
"Expected values are [3, 4, 5]"
|
| 274 |
+
)
|
| 275 |
+
elif data_format == "channels_first":
|
| 276 |
+
if ndim == 3:
|
| 277 |
+
return "NCW"
|
| 278 |
+
elif ndim == 4:
|
| 279 |
+
return "NCHW"
|
| 280 |
+
elif ndim == 5:
|
| 281 |
+
return "NCDHW"
|
| 282 |
+
else:
|
| 283 |
+
raise ValueError(
|
| 284 |
+
f"Input rank not supported: {ndim}. "
|
| 285 |
+
"Expected values are [3, 4, 5]"
|
| 286 |
+
)
|
| 287 |
+
else:
|
| 288 |
+
raise ValueError(
|
| 289 |
+
f"Invalid data_format: {data_format}. "
|
| 290 |
+
'Expected values are ["channels_first", "channels_last"]'
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def conv(
|
| 295 |
+
inputs,
|
| 296 |
+
kernel,
|
| 297 |
+
strides=1,
|
| 298 |
+
padding="valid",
|
| 299 |
+
data_format=None,
|
| 300 |
+
dilation_rate=1,
|
| 301 |
+
):
|
| 302 |
+
def _conv():
|
| 303 |
+
tf_data_format = _convert_data_format(data_format, len(inputs.shape))
|
| 304 |
+
return tf.nn.convolution(
|
| 305 |
+
inputs,
|
| 306 |
+
kernel,
|
| 307 |
+
strides,
|
| 308 |
+
padding.upper(),
|
| 309 |
+
data_format=tf_data_format,
|
| 310 |
+
dilations=dilation_rate,
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
# Certain ops are are broken in Tensorflow on CPU only.
|
| 314 |
+
# We can work around by compiling the op with XLA.
|
| 315 |
+
@tf.function(jit_compile=True)
|
| 316 |
+
def _conv_xla():
|
| 317 |
+
return _conv()
|
| 318 |
+
|
| 319 |
+
# Channels first "NCDHW" (3d convolutions) are broken on CPU without XLA.
|
| 320 |
+
needs_xla = data_format == "channels_first" and len(inputs.shape) == 5
|
| 321 |
+
# grouped convolutions are broken on CPU without XLA.
|
| 322 |
+
data_format = backend.standardize_data_format(data_format)
|
| 323 |
+
if data_format == "channels_last":
|
| 324 |
+
channels = inputs.shape[-1]
|
| 325 |
+
else:
|
| 326 |
+
channels = inputs.shape[1]
|
| 327 |
+
needs_xla = needs_xla or channels != kernel.shape[-2]
|
| 328 |
+
if needs_xla:
|
| 329 |
+
return _conv_xla()
|
| 330 |
+
else:
|
| 331 |
+
return _conv()
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def depthwise_conv(
|
| 335 |
+
inputs,
|
| 336 |
+
kernel,
|
| 337 |
+
strides=1,
|
| 338 |
+
padding="valid",
|
| 339 |
+
data_format=None,
|
| 340 |
+
dilation_rate=1,
|
| 341 |
+
):
|
| 342 |
+
data_format = backend.standardize_data_format(data_format)
|
| 343 |
+
num_spatial_dims = len(inputs.shape) - 2
|
| 344 |
+
if num_spatial_dims > 2:
|
| 345 |
+
raise ValueError(
|
| 346 |
+
"`inputs` rank must be 3 (1D conv) or 4 (2D conv). Received: "
|
| 347 |
+
"{inputs.ndim}."
|
| 348 |
+
)
|
| 349 |
+
# Because we use `tf.nn.depthwise_conv2d` for both 1D and 2D convs, we set
|
| 350 |
+
# `tf_data_format` using 2D conv format.
|
| 351 |
+
tf_data_format = _convert_data_format(data_format, 4)
|
| 352 |
+
padding = padding.upper()
|
| 353 |
+
if isinstance(strides, int):
|
| 354 |
+
strides = (strides,) * num_spatial_dims
|
| 355 |
+
if isinstance(dilation_rate, int):
|
| 356 |
+
dilation_rate = (dilation_rate,) * num_spatial_dims
|
| 357 |
+
if num_spatial_dims == 1:
|
| 358 |
+
# 1D depthwise conv.
|
| 359 |
+
if data_format == "channels_last":
|
| 360 |
+
strides = (1,) + strides * 2 + (1,)
|
| 361 |
+
spatial_start_dim = 1
|
| 362 |
+
else:
|
| 363 |
+
strides = (1, 1) + strides * 2
|
| 364 |
+
spatial_start_dim = 2
|
| 365 |
+
inputs = tf.expand_dims(inputs, spatial_start_dim)
|
| 366 |
+
kernel = tf.expand_dims(kernel, axis=0)
|
| 367 |
+
|
| 368 |
+
dilation_rate = None if dilation_rate is None else (1,) + dilation_rate
|
| 369 |
+
|
| 370 |
+
outputs = tf.nn.depthwise_conv2d(
|
| 371 |
+
inputs,
|
| 372 |
+
kernel,
|
| 373 |
+
strides,
|
| 374 |
+
padding,
|
| 375 |
+
data_format=tf_data_format,
|
| 376 |
+
dilations=dilation_rate,
|
| 377 |
+
)
|
| 378 |
+
return tf.squeeze(outputs, [spatial_start_dim])
|
| 379 |
+
|
| 380 |
+
if data_format == "channels_last":
|
| 381 |
+
strides = (1,) + strides + (1,)
|
| 382 |
+
spatial_start_dim = 1
|
| 383 |
+
else:
|
| 384 |
+
strides = (1, 1) + strides
|
| 385 |
+
spatial_start_dim = 2
|
| 386 |
+
return tf.nn.depthwise_conv2d(
|
| 387 |
+
inputs,
|
| 388 |
+
kernel,
|
| 389 |
+
strides,
|
| 390 |
+
padding,
|
| 391 |
+
data_format=tf_data_format,
|
| 392 |
+
dilations=dilation_rate,
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
def separable_conv(
|
| 397 |
+
inputs,
|
| 398 |
+
depthwise_kernel,
|
| 399 |
+
pointwise_kernel,
|
| 400 |
+
strides=1,
|
| 401 |
+
padding="valid",
|
| 402 |
+
data_format=None,
|
| 403 |
+
dilation_rate=1,
|
| 404 |
+
):
|
| 405 |
+
data_format = backend.standardize_data_format(data_format)
|
| 406 |
+
num_spatial_dims = len(inputs.shape) - 2
|
| 407 |
+
if num_spatial_dims > 2:
|
| 408 |
+
raise ValueError(
|
| 409 |
+
"`num_spatial_dims` must be 1 or 2. Received: "
|
| 410 |
+
f"num_spatial_dims={num_spatial_dims}."
|
| 411 |
+
)
|
| 412 |
+
# Because we use `tf.nn.separable_conv2d` for both 1D and 2D convs, we set
|
| 413 |
+
# `tf_data_format` using 2D conv format.
|
| 414 |
+
tf_data_format = _convert_data_format(data_format, 4)
|
| 415 |
+
padding = padding.upper()
|
| 416 |
+
if isinstance(strides, int):
|
| 417 |
+
strides = (strides,) * num_spatial_dims
|
| 418 |
+
if isinstance(dilation_rate, int):
|
| 419 |
+
dilation_rate = (dilation_rate,) * num_spatial_dims
|
| 420 |
+
if num_spatial_dims == 1:
|
| 421 |
+
# 1D depthwise conv.
|
| 422 |
+
if data_format == "channels_last":
|
| 423 |
+
strides = (1,) + strides * 2 + (1,)
|
| 424 |
+
spatial_start_dim = 1
|
| 425 |
+
else:
|
| 426 |
+
strides = (1, 1) + strides * 2
|
| 427 |
+
spatial_start_dim = 2
|
| 428 |
+
inputs = tf.expand_dims(inputs, spatial_start_dim)
|
| 429 |
+
depthwise_kernel = tf.expand_dims(depthwise_kernel, axis=0)
|
| 430 |
+
pointwise_kernel = tf.expand_dims(pointwise_kernel, axis=0)
|
| 431 |
+
dilation_rate = None if dilation_rate is None else (1,) + dilation_rate
|
| 432 |
+
|
| 433 |
+
outputs = tf.nn.separable_conv2d(
|
| 434 |
+
inputs,
|
| 435 |
+
depthwise_kernel,
|
| 436 |
+
pointwise_kernel,
|
| 437 |
+
strides,
|
| 438 |
+
padding,
|
| 439 |
+
data_format=tf_data_format,
|
| 440 |
+
dilations=dilation_rate,
|
| 441 |
+
)
|
| 442 |
+
return tf.squeeze(outputs, [spatial_start_dim])
|
| 443 |
+
|
| 444 |
+
if data_format == "channels_last":
|
| 445 |
+
strides = (1,) + strides + (1,)
|
| 446 |
+
else:
|
| 447 |
+
strides = (1, 1) + strides
|
| 448 |
+
return tf.nn.separable_conv2d(
|
| 449 |
+
inputs,
|
| 450 |
+
depthwise_kernel,
|
| 451 |
+
pointwise_kernel,
|
| 452 |
+
strides,
|
| 453 |
+
padding,
|
| 454 |
+
data_format=tf_data_format,
|
| 455 |
+
dilations=dilation_rate,
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def conv_transpose(
|
| 460 |
+
inputs,
|
| 461 |
+
kernel,
|
| 462 |
+
strides=1,
|
| 463 |
+
padding="valid",
|
| 464 |
+
output_padding=None,
|
| 465 |
+
data_format=None,
|
| 466 |
+
dilation_rate=1,
|
| 467 |
+
):
|
| 468 |
+
data_format = backend.standardize_data_format(data_format)
|
| 469 |
+
tf_data_format = _convert_data_format(data_format, len(inputs.shape))
|
| 470 |
+
kernel_size = kernel.shape[:-2]
|
| 471 |
+
filters = kernel.shape[-2]
|
| 472 |
+
input_shape = list(inputs.shape)
|
| 473 |
+
symbolic_shape = tf.shape(inputs)
|
| 474 |
+
for i, e in enumerate(input_shape):
|
| 475 |
+
if e is None:
|
| 476 |
+
input_shape[i] = symbolic_shape[i]
|
| 477 |
+
output_shape = compute_conv_transpose_output_shape(
|
| 478 |
+
input_shape,
|
| 479 |
+
kernel_size,
|
| 480 |
+
filters,
|
| 481 |
+
strides,
|
| 482 |
+
padding,
|
| 483 |
+
output_padding,
|
| 484 |
+
data_format,
|
| 485 |
+
dilation_rate,
|
| 486 |
+
)
|
| 487 |
+
|
| 488 |
+
return tf.nn.conv_transpose(
|
| 489 |
+
inputs,
|
| 490 |
+
kernel,
|
| 491 |
+
output_shape,
|
| 492 |
+
strides,
|
| 493 |
+
padding=padding.upper(),
|
| 494 |
+
data_format=tf_data_format,
|
| 495 |
+
dilations=dilation_rate,
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
def one_hot(x, num_classes, axis=-1, dtype="float32", sparse=False):
|
| 500 |
+
x = convert_to_tensor(x, dtype="int64")
|
| 501 |
+
if dtype is None:
|
| 502 |
+
dtype = "float32"
|
| 503 |
+
else:
|
| 504 |
+
dtype = backend.standardize_dtype(dtype)
|
| 505 |
+
if sparse:
|
| 506 |
+
# We don't use `tf.sparse.bincount`, it doesn't handle negative indices
|
| 507 |
+
# and only support rank 1 and 2 tensors (`one_hot` adds a dimension).
|
| 508 |
+
if axis < 0:
|
| 509 |
+
axis = axis + len(x.shape) + 1
|
| 510 |
+
values_count = math.prod(x.shape)
|
| 511 |
+
values = tf.reshape(x, (values_count,))
|
| 512 |
+
# We deal with negative inputs by having zeros in the output although
|
| 513 |
+
# it's useless. It makes shapes static.
|
| 514 |
+
values = tf.cast(tf.greater_equal(values, 0), dtype=dtype)
|
| 515 |
+
indices = [tf.range(dim) for dim in x.shape]
|
| 516 |
+
indices = tf.meshgrid(*indices, indexing="ij")
|
| 517 |
+
indices.insert(axis, tf.maximum(x, 0)) # Deal with negative indices
|
| 518 |
+
indices = [tf.reshape(a, (values_count, 1)) for a in indices]
|
| 519 |
+
indices = [tf.cast(a, tf.int64) for a in indices]
|
| 520 |
+
indices = tf.concat(indices, axis=1)
|
| 521 |
+
shape = list(x.shape)
|
| 522 |
+
shape.insert(axis, num_classes)
|
| 523 |
+
return tf.SparseTensor(indices, values, shape)
|
| 524 |
+
on_value, off_value = (True, False) if dtype == "bool" else (None, None)
|
| 525 |
+
return tf.one_hot(
|
| 526 |
+
x,
|
| 527 |
+
num_classes,
|
| 528 |
+
on_value=on_value,
|
| 529 |
+
off_value=off_value,
|
| 530 |
+
axis=axis,
|
| 531 |
+
dtype=dtype,
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
def multi_hot(x, num_classes, axis=-1, dtype="float32", sparse=False):
|
| 536 |
+
reduction_axis = 1 if len(x.shape) > 1 else 0
|
| 537 |
+
if backend.standardize_dtype(dtype) == "bool":
|
| 538 |
+
if sparse:
|
| 539 |
+
# `tf.sparse.reduce_max` doesn't work on bool and there is no
|
| 540 |
+
# `tf.sparse.reduce_any`.
|
| 541 |
+
outputs = one_hot(
|
| 542 |
+
x, num_classes, axis=axis, dtype="int8", sparse=True
|
| 543 |
+
)
|
| 544 |
+
outputs = tf.sparse.reduce_max(
|
| 545 |
+
outputs, axis=reduction_axis, output_is_sparse=True
|
| 546 |
+
)
|
| 547 |
+
outputs_shape = outputs.shape
|
| 548 |
+
outputs = tf.cast(outputs, dtype)
|
| 549 |
+
outputs.set_shape(outputs_shape)
|
| 550 |
+
return outputs
|
| 551 |
+
else:
|
| 552 |
+
outputs = one_hot(x, num_classes, axis=axis, dtype=dtype)
|
| 553 |
+
return tf.reduce_any(outputs, axis=reduction_axis)
|
| 554 |
+
else:
|
| 555 |
+
if sparse:
|
| 556 |
+
# We don't use `tf.sparse.bincount`, it doesn't handle negative
|
| 557 |
+
# indices and has a rank limitation.
|
| 558 |
+
outputs = one_hot(
|
| 559 |
+
x, num_classes, axis=axis, dtype=dtype, sparse=True
|
| 560 |
+
)
|
| 561 |
+
return tf.sparse.reduce_max(
|
| 562 |
+
outputs, axis=reduction_axis, output_is_sparse=True
|
| 563 |
+
)
|
| 564 |
+
else:
|
| 565 |
+
outputs = one_hot(x, num_classes, axis=axis, dtype=dtype)
|
| 566 |
+
return tf.reduce_max(outputs, axis=reduction_axis)
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
def _get_logits(output, from_logits, op_type, fn_name):
|
| 570 |
+
"""Retrieves logits tensor from maybe-softmax or maybe-sigmoid tensor."""
|
| 571 |
+
output_ = output
|
| 572 |
+
from_logits_ = from_logits
|
| 573 |
+
|
| 574 |
+
has_keras_logits = hasattr(output, "_keras_logits")
|
| 575 |
+
if has_keras_logits:
|
| 576 |
+
output_ = output._keras_logits
|
| 577 |
+
from_logits_ = True
|
| 578 |
+
|
| 579 |
+
from_expected_op_type = (
|
| 580 |
+
hasattr(output, "op")
|
| 581 |
+
and not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable))
|
| 582 |
+
and output.op.type == op_type
|
| 583 |
+
) and not has_keras_logits
|
| 584 |
+
|
| 585 |
+
if from_expected_op_type:
|
| 586 |
+
# When softmax activation function is used for output operation, we
|
| 587 |
+
# use logits from the softmax function directly to compute loss in order
|
| 588 |
+
# to prevent collapsing zero when training.
|
| 589 |
+
assert len(output.op.inputs) == 1
|
| 590 |
+
output_ = output.op.inputs[0]
|
| 591 |
+
from_logits_ = True
|
| 592 |
+
|
| 593 |
+
if from_logits and (has_keras_logits or from_expected_op_type):
|
| 594 |
+
warnings.warn(
|
| 595 |
+
f'"`{fn_name}` received `from_logits=True`, but '
|
| 596 |
+
f"the `output` argument was produced by a {op_type} "
|
| 597 |
+
"activation and thus does not represent logits. "
|
| 598 |
+
"Was this intended?",
|
| 599 |
+
stacklevel=2,
|
| 600 |
+
)
|
| 601 |
+
return output_, from_logits_
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
|
| 605 |
+
"""Categorical crossentropy between an output tensor and a target tensor.
|
| 606 |
+
|
| 607 |
+
Args:
|
| 608 |
+
target: A tensor of the same shape as `output`.
|
| 609 |
+
output: A tensor resulting from a softmax
|
| 610 |
+
(unless `from_logits` is `True`, in which
|
| 611 |
+
case `output` is expected to be the logits).
|
| 612 |
+
from_logits: Boolean, whether `output` is the
|
| 613 |
+
result of a softmax, or is a tensor of logits.
|
| 614 |
+
axis: Int specifying the channels axis. `axis=-1` corresponds to data
|
| 615 |
+
format `channels_last`, and `axis=1` corresponds to data format
|
| 616 |
+
`channels_first`.
|
| 617 |
+
|
| 618 |
+
Returns:
|
| 619 |
+
Output tensor.
|
| 620 |
+
|
| 621 |
+
Example:
|
| 622 |
+
|
| 623 |
+
>>> a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3])
|
| 624 |
+
>>> print(a)
|
| 625 |
+
tf.Tensor(
|
| 626 |
+
[[1. 0. 0.]
|
| 627 |
+
[0. 1. 0.]
|
| 628 |
+
[0. 0. 1.]], shape=(3, 3), dtype=float32)
|
| 629 |
+
>>> b = tf.constant([.9, .05, .05, .05, .89, .06, .05, .01, .94],
|
| 630 |
+
... shape=[3, 3])
|
| 631 |
+
>>> print(b)
|
| 632 |
+
tf.Tensor(
|
| 633 |
+
[[0.9 0.05 0.05]
|
| 634 |
+
[0.05 0.89 0.06]
|
| 635 |
+
[0.05 0.01 0.94]], shape=(3, 3), dtype=float32)
|
| 636 |
+
>>> loss = categorical_crossentropy(a, b)
|
| 637 |
+
>>> print(np.around(loss, 5))
|
| 638 |
+
[0.10536 0.11653 0.06188]
|
| 639 |
+
>>> loss = categorical_crossentropy(a, a)
|
| 640 |
+
>>> print(np.around(loss, 5))
|
| 641 |
+
[0. 0. 0.]
|
| 642 |
+
"""
|
| 643 |
+
target = tf.convert_to_tensor(target)
|
| 644 |
+
output = tf.convert_to_tensor(output)
|
| 645 |
+
|
| 646 |
+
if len(target.shape) < 1:
|
| 647 |
+
raise ValueError(
|
| 648 |
+
"Arguments `target` and `output` must be at least rank 1. "
|
| 649 |
+
"Received: "
|
| 650 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 651 |
+
)
|
| 652 |
+
if len(target.shape) != len(output.shape):
|
| 653 |
+
raise ValueError(
|
| 654 |
+
"Arguments `target` and `output` must have the same rank "
|
| 655 |
+
"(ndim). Received: "
|
| 656 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 657 |
+
)
|
| 658 |
+
for e1, e2 in zip(target.shape, output.shape):
|
| 659 |
+
if e1 is not None and e2 is not None and e1 != e2:
|
| 660 |
+
raise ValueError(
|
| 661 |
+
"Arguments `target` and `output` must have the same shape. "
|
| 662 |
+
"Received: "
|
| 663 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
output, from_logits = _get_logits(
|
| 667 |
+
output, from_logits, "Softmax", "categorical_crossentropy"
|
| 668 |
+
)
|
| 669 |
+
if from_logits:
|
| 670 |
+
return tf.nn.softmax_cross_entropy_with_logits(
|
| 671 |
+
labels=target, logits=output, axis=axis
|
| 672 |
+
)
|
| 673 |
+
|
| 674 |
+
# Adjust the predictions so that the probability of
|
| 675 |
+
# each class for every sample adds up to 1
|
| 676 |
+
# This is needed to ensure that the cross entropy is
|
| 677 |
+
# computed correctly.
|
| 678 |
+
output = output / tf.reduce_sum(output, axis, keepdims=True)
|
| 679 |
+
|
| 680 |
+
# Compute cross entropy from probabilities.
|
| 681 |
+
output = tf.clip_by_value(
|
| 682 |
+
output, backend.epsilon(), 1.0 - backend.epsilon()
|
| 683 |
+
)
|
| 684 |
+
return -tf.reduce_sum(target * tf.math.log(output), axis)
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
|
| 688 |
+
"""Categorical crossentropy with integer targets.
|
| 689 |
+
|
| 690 |
+
Args:
|
| 691 |
+
target: An integer tensor.
|
| 692 |
+
output: A tensor resulting from a softmax
|
| 693 |
+
(unless `from_logits` is True, in which
|
| 694 |
+
case `output` is expected to be the logits).
|
| 695 |
+
from_logits: Boolean, whether `output` is the
|
| 696 |
+
result of a softmax, or is a tensor of logits.
|
| 697 |
+
axis: Int specifying the channels axis. `axis=-1` corresponds to data
|
| 698 |
+
format `channels_last`, and `axis=1` corresponds to data format
|
| 699 |
+
`channels_first`.
|
| 700 |
+
|
| 701 |
+
Returns:
|
| 702 |
+
Output tensor.
|
| 703 |
+
"""
|
| 704 |
+
if axis != -1 and axis != len(output.shape) - 1:
|
| 705 |
+
raise ValueError(
|
| 706 |
+
f"Only axis=-1 is currently supported. Received: axis={axis}"
|
| 707 |
+
)
|
| 708 |
+
output, from_logits = _get_logits(
|
| 709 |
+
output, from_logits, "Softmax", "sparse_categorical_crossentropy"
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
target = tf.convert_to_tensor(target)
|
| 713 |
+
target = tf.cast(target, dtype="int64")
|
| 714 |
+
output = tf.convert_to_tensor(output)
|
| 715 |
+
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
|
| 716 |
+
target = tf.squeeze(target, axis=-1)
|
| 717 |
+
|
| 718 |
+
if len(output.shape) < 1:
|
| 719 |
+
raise ValueError(
|
| 720 |
+
"Argument `output` must be at least rank 1. "
|
| 721 |
+
"Received: "
|
| 722 |
+
f"output.shape={output.shape}"
|
| 723 |
+
)
|
| 724 |
+
if len(target.shape) != len(output.shape[:-1]):
|
| 725 |
+
raise ValueError(
|
| 726 |
+
"Argument `output` must have rank (ndim) `target.ndim - 1`. "
|
| 727 |
+
"Received: "
|
| 728 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 729 |
+
)
|
| 730 |
+
for e1, e2 in zip(target.shape, output.shape[:-1]):
|
| 731 |
+
if e1 is not None and e2 is not None and e1 != e2:
|
| 732 |
+
raise ValueError(
|
| 733 |
+
"Arguments `target` and `output` must have the same shape "
|
| 734 |
+
"up until the last dimension: "
|
| 735 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 736 |
+
)
|
| 737 |
+
|
| 738 |
+
if not from_logits:
|
| 739 |
+
output = tf.clip_by_value(
|
| 740 |
+
output, backend.epsilon(), 1 - backend.epsilon()
|
| 741 |
+
)
|
| 742 |
+
output = tf.math.log(output)
|
| 743 |
+
|
| 744 |
+
result = tf.nn.sparse_softmax_cross_entropy_with_logits(
|
| 745 |
+
labels=target, logits=output
|
| 746 |
+
)
|
| 747 |
+
return result
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
def binary_crossentropy(target, output, from_logits=False):
|
| 751 |
+
"""Binary crossentropy between an output tensor and a target tensor.
|
| 752 |
+
|
| 753 |
+
Args:
|
| 754 |
+
target: A tensor with the same shape as `output`.
|
| 755 |
+
output: A tensor.
|
| 756 |
+
from_logits: Whether `output` is expected to be a logits tensor.
|
| 757 |
+
By default, we consider that `output`
|
| 758 |
+
encodes a probability distribution.
|
| 759 |
+
|
| 760 |
+
Returns:
|
| 761 |
+
A tensor.
|
| 762 |
+
"""
|
| 763 |
+
target = tf.convert_to_tensor(target)
|
| 764 |
+
output = tf.convert_to_tensor(output)
|
| 765 |
+
|
| 766 |
+
if len(target.shape) != len(output.shape):
|
| 767 |
+
raise ValueError(
|
| 768 |
+
"Arguments `target` and `output` must have the same rank "
|
| 769 |
+
"(ndim). Received: "
|
| 770 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 771 |
+
)
|
| 772 |
+
for e1, e2 in zip(target.shape, output.shape):
|
| 773 |
+
if e1 is not None and e2 is not None and e1 != e2:
|
| 774 |
+
raise ValueError(
|
| 775 |
+
"Arguments `target` and `output` must have the same shape. "
|
| 776 |
+
"Received: "
|
| 777 |
+
f"target.shape={target.shape}, output.shape={output.shape}"
|
| 778 |
+
)
|
| 779 |
+
|
| 780 |
+
output, from_logits = _get_logits(
|
| 781 |
+
output, from_logits, "Sigmoid", "binary_crossentropy"
|
| 782 |
+
)
|
| 783 |
+
|
| 784 |
+
if from_logits:
|
| 785 |
+
return tf.nn.sigmoid_cross_entropy_with_logits(
|
| 786 |
+
labels=target, logits=output
|
| 787 |
+
)
|
| 788 |
+
|
| 789 |
+
# Compute cross entropy from probabilities.
|
| 790 |
+
output = tf.clip_by_value(
|
| 791 |
+
output, backend.epsilon(), 1.0 - backend.epsilon()
|
| 792 |
+
)
|
| 793 |
+
bce = target * tf.math.log(output)
|
| 794 |
+
bce += (1 - target) * tf.math.log(1 - output)
|
| 795 |
+
return -bce
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
def moments(x, axes, keepdims=False, synchronized=False):
|
| 799 |
+
# The dynamic range of float16 is too limited for statistics. As a
|
| 800 |
+
# workaround, we simply perform the operations on float32 and convert back
|
| 801 |
+
# to float16
|
| 802 |
+
need_cast = False
|
| 803 |
+
ori_dtype = backend.standardize_dtype(x.dtype)
|
| 804 |
+
if ori_dtype in ("float16", "bfloat16"):
|
| 805 |
+
need_cast = True
|
| 806 |
+
x = cast(x, "float32")
|
| 807 |
+
|
| 808 |
+
if synchronized:
|
| 809 |
+
mean, variance = _compute_moments_sync(x, axes, keepdims)
|
| 810 |
+
else:
|
| 811 |
+
mean, variance = _compute_moments(x, axes, keepdims)
|
| 812 |
+
if need_cast:
|
| 813 |
+
# avoid overflow and underflow when casting from float16 to float32
|
| 814 |
+
mean = tf.clip_by_value(mean, tf.float16.min, tf.float16.max)
|
| 815 |
+
variance = tf.clip_by_value(variance, tf.float16.min, tf.float16.max)
|
| 816 |
+
mean = cast(mean, ori_dtype)
|
| 817 |
+
variance = cast(variance, ori_dtype)
|
| 818 |
+
return mean, variance
|
| 819 |
+
|
| 820 |
+
|
| 821 |
+
def _compute_moments_sync(x, axes, keepdims):
|
| 822 |
+
replica_ctx = tf.distribute.get_replica_context()
|
| 823 |
+
if not replica_ctx:
|
| 824 |
+
return _compute_moments(x, axes, keepdims)
|
| 825 |
+
|
| 826 |
+
local_count = tf.ones_like(x, name="count")
|
| 827 |
+
|
| 828 |
+
local_sum = tf.reduce_sum(x, axis=axes, keepdims=True)
|
| 829 |
+
local_squared_sum = tf.reduce_sum(tf.square(x), axis=axes, keepdims=True)
|
| 830 |
+
local_count = tf.reduce_sum(local_count, axis=axes, keepdims=True)
|
| 831 |
+
|
| 832 |
+
# TODO(b/163099951): batch the all-reduces once we sort out the
|
| 833 |
+
# ordering issue for NCCL. We don't have a mechanism to launch
|
| 834 |
+
# NCCL in the same order in each replica nowadays, so we limit
|
| 835 |
+
# NCCL to batch all-reduces.
|
| 836 |
+
y_sum = replica_ctx.all_reduce(tf.distribute.ReduceOp.SUM, local_sum)
|
| 837 |
+
y_squared_sum = replica_ctx.all_reduce(
|
| 838 |
+
tf.distribute.ReduceOp.SUM, local_squared_sum
|
| 839 |
+
)
|
| 840 |
+
count_sum = replica_ctx.all_reduce(tf.distribute.ReduceOp.SUM, local_count)
|
| 841 |
+
|
| 842 |
+
mean = tf.math.divide_no_nan(y_sum, count_sum)
|
| 843 |
+
y_squared_mean = tf.math.divide_no_nan(y_squared_sum, count_sum)
|
| 844 |
+
# var = E(x^2) - E(x)^2
|
| 845 |
+
variance = tf.maximum(y_squared_mean - tf.square(mean), 0.0)
|
| 846 |
+
if not keepdims:
|
| 847 |
+
mean = tf.squeeze(mean, axes)
|
| 848 |
+
variance = tf.squeeze(variance, axes)
|
| 849 |
+
|
| 850 |
+
return mean, variance
|
| 851 |
+
|
| 852 |
+
|
| 853 |
+
def _compute_moments(x, axes, keepdims):
|
| 854 |
+
return tf.nn.moments(x, axes, keepdims=keepdims)
|
| 855 |
+
|
| 856 |
+
|
| 857 |
+
def batch_normalization(
|
| 858 |
+
x, mean, variance, axis, offset=None, scale=None, epsilon=1e-3
|
| 859 |
+
):
|
| 860 |
+
if axis != -1:
|
| 861 |
+
shape = [1] * len(x.shape)
|
| 862 |
+
shape[axis] = mean.shape[0]
|
| 863 |
+
mean = tf.reshape(mean, shape)
|
| 864 |
+
variance = tf.reshape(variance, shape)
|
| 865 |
+
if offset is not None:
|
| 866 |
+
offset = tf.reshape(offset, shape)
|
| 867 |
+
if scale is not None:
|
| 868 |
+
scale = tf.reshape(scale, shape)
|
| 869 |
+
|
| 870 |
+
return tf.nn.batch_normalization(
|
| 871 |
+
x=x,
|
| 872 |
+
mean=mean,
|
| 873 |
+
variance=variance,
|
| 874 |
+
offset=offset,
|
| 875 |
+
scale=scale,
|
| 876 |
+
variance_epsilon=epsilon,
|
| 877 |
+
)
|
| 878 |
+
|
| 879 |
+
|
| 880 |
+
def ctc_loss(
|
| 881 |
+
target,
|
| 882 |
+
output,
|
| 883 |
+
target_length,
|
| 884 |
+
output_length,
|
| 885 |
+
mask_index=0,
|
| 886 |
+
):
|
| 887 |
+
target = convert_to_tensor(target)
|
| 888 |
+
output = convert_to_tensor(output)
|
| 889 |
+
target = tf.cast(target, dtype="int32")
|
| 890 |
+
|
| 891 |
+
# `tf.nn.ctc_loss` will internally cast to float32 when the input is float16
|
| 892 |
+
# or bfloat16. Additionally, it will raise an error when the input is
|
| 893 |
+
# float64. As a result, we perform the casting externally and add support
|
| 894 |
+
# for float64.
|
| 895 |
+
result_dtype = backend.result_type(output.dtype, "float32")
|
| 896 |
+
compute_dtype = "float32" if result_dtype == "float64" else result_dtype
|
| 897 |
+
output = tf.cast(output, compute_dtype)
|
| 898 |
+
loss = tf.nn.ctc_loss(
|
| 899 |
+
labels=target,
|
| 900 |
+
logits=output,
|
| 901 |
+
label_length=target_length,
|
| 902 |
+
logit_length=output_length,
|
| 903 |
+
blank_index=mask_index,
|
| 904 |
+
logits_time_major=False,
|
| 905 |
+
)
|
| 906 |
+
return tf.cast(loss, result_dtype)
|
| 907 |
+
|
| 908 |
+
|
| 909 |
+
def ctc_decode(
|
| 910 |
+
inputs,
|
| 911 |
+
sequence_lengths,
|
| 912 |
+
strategy="greedy",
|
| 913 |
+
beam_width=100,
|
| 914 |
+
top_paths=1,
|
| 915 |
+
merge_repeated=True,
|
| 916 |
+
mask_index=0,
|
| 917 |
+
):
|
| 918 |
+
inputs = convert_to_tensor(inputs)
|
| 919 |
+
input_shape = tf.shape(inputs)
|
| 920 |
+
num_samples, num_steps = input_shape[0], input_shape[1]
|
| 921 |
+
inputs = tf.transpose(inputs, (1, 0, 2))
|
| 922 |
+
|
| 923 |
+
dtype = backend.result_type(inputs.dtype, "float32")
|
| 924 |
+
inputs = tf.cast(inputs, dtype)
|
| 925 |
+
|
| 926 |
+
sequence_lengths = convert_to_tensor(sequence_lengths, dtype="int32")
|
| 927 |
+
if strategy == "greedy":
|
| 928 |
+
(decoded, scores) = tf.nn.ctc_greedy_decoder(
|
| 929 |
+
inputs=inputs,
|
| 930 |
+
sequence_length=sequence_lengths,
|
| 931 |
+
merge_repeated=merge_repeated,
|
| 932 |
+
blank_index=mask_index,
|
| 933 |
+
)
|
| 934 |
+
elif strategy == "beam_search":
|
| 935 |
+
# Move `mask_index` column to the last position since this is the
|
| 936 |
+
# default for `tf.nn.ctc_beam_search_decoder`
|
| 937 |
+
if mask_index is not None:
|
| 938 |
+
inputs_before = inputs[..., :mask_index]
|
| 939 |
+
inputs_mask = inputs[..., mask_index : mask_index + 1]
|
| 940 |
+
inputs_after = inputs[..., mask_index + 1 :]
|
| 941 |
+
inputs = tf.concat(
|
| 942 |
+
[inputs_before, inputs_after, inputs_mask], axis=-1
|
| 943 |
+
)
|
| 944 |
+
(decoded, scores) = tf.nn.ctc_beam_search_decoder(
|
| 945 |
+
inputs=inputs,
|
| 946 |
+
sequence_length=sequence_lengths,
|
| 947 |
+
beam_width=beam_width,
|
| 948 |
+
top_paths=top_paths,
|
| 949 |
+
)
|
| 950 |
+
else:
|
| 951 |
+
raise ValueError(
|
| 952 |
+
f"Invalid strategy {strategy}. Supported values are "
|
| 953 |
+
"'greedy' and 'beam_search'."
|
| 954 |
+
)
|
| 955 |
+
|
| 956 |
+
# Postprocess sparse tensor
|
| 957 |
+
decoded_dense = []
|
| 958 |
+
for st in decoded:
|
| 959 |
+
st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps))
|
| 960 |
+
decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1))
|
| 961 |
+
decoded_dense = tf.stack(decoded_dense, axis=0)
|
| 962 |
+
decoded_dense = tf.cast(decoded_dense, "int32")
|
| 963 |
+
|
| 964 |
+
# We need to recover the labels because we swapped the indices earlier
|
| 965 |
+
if strategy == "beam_search" and mask_index is not None:
|
| 966 |
+
if mask_index < 0:
|
| 967 |
+
mask_index = mask_index + input_shape[-1]
|
| 968 |
+
decoded_dense = tf.where(
|
| 969 |
+
decoded_dense >= mask_index, decoded_dense + 1, decoded_dense
|
| 970 |
+
)
|
| 971 |
+
return decoded_dense, scores
|
| 972 |
+
|
| 973 |
+
|
| 974 |
+
def psnr(x1, x2, max_val):
|
| 975 |
+
from keras.src.backend.tensorflow.numpy import log10
|
| 976 |
+
|
| 977 |
+
if x1.shape != x2.shape:
|
| 978 |
+
raise ValueError(
|
| 979 |
+
f"Input shapes {x1.shape} and {x2.shape} must "
|
| 980 |
+
"match for PSNR calculation. "
|
| 981 |
+
)
|
| 982 |
+
|
| 983 |
+
max_val = convert_to_tensor(max_val, dtype=x2.dtype)
|
| 984 |
+
mse = tf.reduce_mean(tf.square(x1 - x2))
|
| 985 |
+
psnr = 20 * log10(max_val) - 10 * log10(mse)
|
| 986 |
+
return psnr
|
| 987 |
+
|
| 988 |
+
|
| 989 |
+
def _get_large_negative(dtype):
|
| 990 |
+
dtype = backend.standardize_dtype(dtype)
|
| 991 |
+
val = 65500.0 if dtype == "float16" else 3.38953e38
|
| 992 |
+
return tf.constant(val * -0.7, dtype=dtype)
|
| 993 |
+
|
| 994 |
+
|
| 995 |
+
def _apply_masks(logits, mask, is_causal):
|
| 996 |
+
if mask is None and not is_causal:
|
| 997 |
+
return logits
|
| 998 |
+
|
| 999 |
+
combined_mask = tf.ones_like(logits, dtype="bool")
|
| 1000 |
+
if mask is not None:
|
| 1001 |
+
combined_mask = tf.logical_and(combined_mask, mask)
|
| 1002 |
+
|
| 1003 |
+
if is_causal:
|
| 1004 |
+
logits_shape = tf.shape(logits)
|
| 1005 |
+
T, S = logits_shape[2], logits_shape[3]
|
| 1006 |
+
mask = tf.linalg.band_part(tf.ones((T, S), "bool"), -1, 0)
|
| 1007 |
+
mask = mask[None, None, :, :]
|
| 1008 |
+
combined_mask = tf.logical_and(combined_mask, mask)
|
| 1009 |
+
|
| 1010 |
+
padded_logits = tf.where(
|
| 1011 |
+
combined_mask, logits, _get_large_negative(logits.dtype)
|
| 1012 |
+
)
|
| 1013 |
+
return padded_logits
|
| 1014 |
+
|
| 1015 |
+
|
| 1016 |
+
def _dot_product_attention_xla(query, key, value, bias, mask, is_causal, scale):
|
| 1017 |
+
logits_dtype = backend.result_type(query.dtype, "float32")
|
| 1018 |
+
logits = tf.einsum("BTNH,BSNH->BNTS", query, key, optimize="optimal")
|
| 1019 |
+
logits = tf.cast(logits, logits_dtype)
|
| 1020 |
+
logits = tf.multiply(logits, tf.cast(scale, logits.dtype))
|
| 1021 |
+
|
| 1022 |
+
if bias is not None:
|
| 1023 |
+
logits = tf.add(logits, tf.cast(bias, logits.dtype))
|
| 1024 |
+
|
| 1025 |
+
padded_logits = _apply_masks(logits, mask, is_causal)
|
| 1026 |
+
|
| 1027 |
+
# Softmax is always carried out in high precision.
|
| 1028 |
+
probs_dtype = backend.result_type(padded_logits.dtype, "float32")
|
| 1029 |
+
probs = tf.cast(
|
| 1030 |
+
tf.nn.softmax(tf.cast(padded_logits, probs_dtype), axis=-1), key.dtype
|
| 1031 |
+
)
|
| 1032 |
+
return tf.einsum("BNTS,BSNH->BTNH", probs, value, optimize="optimal")
|
| 1033 |
+
|
| 1034 |
+
|
| 1035 |
+
def dot_product_attention(
|
| 1036 |
+
query,
|
| 1037 |
+
key,
|
| 1038 |
+
value,
|
| 1039 |
+
bias=None,
|
| 1040 |
+
mask=None,
|
| 1041 |
+
scale=None,
|
| 1042 |
+
is_causal=False,
|
| 1043 |
+
flash_attention=None,
|
| 1044 |
+
):
|
| 1045 |
+
if flash_attention is None:
|
| 1046 |
+
flash_attention = False
|
| 1047 |
+
if flash_attention:
|
| 1048 |
+
raise ValueError(
|
| 1049 |
+
"Flash attention is not supported in tensorflow backend."
|
| 1050 |
+
)
|
| 1051 |
+
|
| 1052 |
+
# Ref: jax.nn.dot_product_attention
|
| 1053 |
+
# https://github.com/jax-ml/jax/blob/jax-v0.4.32/jax/_src/nn/functions.py#L828
|
| 1054 |
+
# Not support `query_seq_lengths` and `key_value_seq_lengths` args
|
| 1055 |
+
query = convert_to_tensor(query)
|
| 1056 |
+
key = convert_to_tensor(key)
|
| 1057 |
+
value = convert_to_tensor(value)
|
| 1058 |
+
if len(query.shape) != 4:
|
| 1059 |
+
raise ValueError(
|
| 1060 |
+
"`dot_product_attention` only supports 4D inputs. "
|
| 1061 |
+
f"Received: query.shape={query.shape}, key.shape={key.shape}, "
|
| 1062 |
+
f"value.shape={value.shape}."
|
| 1063 |
+
)
|
| 1064 |
+
H = tf.shape(key)[-1]
|
| 1065 |
+
scale = (1.0 / tf.sqrt(tf.cast(H, "float32"))) if scale is None else scale
|
| 1066 |
+
return _dot_product_attention_xla(
|
| 1067 |
+
query, key, value, bias, mask, is_causal, scale
|
| 1068 |
+
)
|
infer_4_33_0/lib/python3.10/site-packages/keras/src/backend/tensorflow/numpy.py
ADDED
|
@@ -0,0 +1,2658 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import builtins
|
| 2 |
+
import collections
|
| 3 |
+
import functools
|
| 4 |
+
import math
|
| 5 |
+
import string
|
| 6 |
+
import warnings
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import tensorflow as tf
|
| 10 |
+
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
|
| 11 |
+
from tensorflow.python.ops.math_ops import is_nan
|
| 12 |
+
|
| 13 |
+
from keras.src import tree
|
| 14 |
+
from keras.src.backend import config
|
| 15 |
+
from keras.src.backend import standardize_dtype
|
| 16 |
+
from keras.src.backend.common import dtypes
|
| 17 |
+
from keras.src.backend.common.backend_utils import canonicalize_axis
|
| 18 |
+
from keras.src.backend.common.backend_utils import to_tuple_or_list
|
| 19 |
+
from keras.src.backend.common.backend_utils import vectorize_impl
|
| 20 |
+
from keras.src.backend.tensorflow import sparse
|
| 21 |
+
from keras.src.backend.tensorflow.core import cast
|
| 22 |
+
from keras.src.backend.tensorflow.core import convert_to_tensor
|
| 23 |
+
from keras.src.backend.tensorflow.core import shape as shape_op
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@sparse.elementwise_binary_union(tf.sparse.add)
|
| 27 |
+
def add(x1, x2):
|
| 28 |
+
if not isinstance(x1, (int, float)):
|
| 29 |
+
x1 = convert_to_tensor(x1)
|
| 30 |
+
if not isinstance(x2, (int, float)):
|
| 31 |
+
x2 = convert_to_tensor(x2)
|
| 32 |
+
dtype = dtypes.result_type(
|
| 33 |
+
getattr(x1, "dtype", type(x1)),
|
| 34 |
+
getattr(x2, "dtype", type(x2)),
|
| 35 |
+
)
|
| 36 |
+
x1 = convert_to_tensor(x1, dtype)
|
| 37 |
+
x2 = convert_to_tensor(x2, dtype)
|
| 38 |
+
|
| 39 |
+
# Special case of `tf.add`: `tf.nn.bias_add`
|
| 40 |
+
# `BiasAdd` can be fused with `MatMul` and `Conv*` kernels
|
| 41 |
+
# Expecting `x1` to be `inputs` and `x2` to be `bias` (no swapping)
|
| 42 |
+
x2_squeeze_shape = [d for d in x2.shape.as_list() if d is None or d > 1]
|
| 43 |
+
if (
|
| 44 |
+
# `x2` looks like bias (can be squeezed to vector)
|
| 45 |
+
1 == len(x2_squeeze_shape)
|
| 46 |
+
# `x1` looks like input tensor (rank >= 2)
|
| 47 |
+
and len(x1.shape) > 1
|
| 48 |
+
# `x2` non-squeezable dimension defined
|
| 49 |
+
and x2_squeeze_shape[0] is not None
|
| 50 |
+
# `x2` non-squeezable dimension match `x1` channel dimension
|
| 51 |
+
and x2_squeeze_shape[0]
|
| 52 |
+
in {x1.shape.as_list()[1], x1.shape.as_list()[-1]}
|
| 53 |
+
):
|
| 54 |
+
if x1.shape[-1] == x2_squeeze_shape[0]:
|
| 55 |
+
data_format = "NHWC"
|
| 56 |
+
else:
|
| 57 |
+
data_format = "NCHW"
|
| 58 |
+
if len(x2.shape) > 1:
|
| 59 |
+
x2 = tf.squeeze(x2)
|
| 60 |
+
return tf.nn.bias_add(x1, x2, data_format=data_format)
|
| 61 |
+
|
| 62 |
+
return tf.add(x1, x2)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def bincount(x, weights=None, minlength=0, sparse=False):
|
| 66 |
+
x = convert_to_tensor(x)
|
| 67 |
+
dtypes_to_resolve = [x.dtype]
|
| 68 |
+
if standardize_dtype(x.dtype) not in ["int32", "int64"]:
|
| 69 |
+
x = tf.cast(x, tf.int32)
|
| 70 |
+
if weights is not None:
|
| 71 |
+
weights = convert_to_tensor(weights)
|
| 72 |
+
dtypes_to_resolve.append(weights.dtype)
|
| 73 |
+
dtype = dtypes.result_type(*dtypes_to_resolve)
|
| 74 |
+
if standardize_dtype(weights.dtype) not in [
|
| 75 |
+
"int32",
|
| 76 |
+
"int64",
|
| 77 |
+
"float32",
|
| 78 |
+
"float64",
|
| 79 |
+
]:
|
| 80 |
+
if "int" in standardize_dtype(weights.dtype):
|
| 81 |
+
weights = tf.cast(weights, tf.int32)
|
| 82 |
+
else:
|
| 83 |
+
weights = tf.cast(weights, tf.float32)
|
| 84 |
+
else:
|
| 85 |
+
dtype = "int32"
|
| 86 |
+
if sparse or isinstance(x, tf.SparseTensor):
|
| 87 |
+
output = tf.sparse.bincount(
|
| 88 |
+
x,
|
| 89 |
+
weights=weights,
|
| 90 |
+
minlength=minlength,
|
| 91 |
+
axis=-1,
|
| 92 |
+
)
|
| 93 |
+
actual_length = output.shape[-1]
|
| 94 |
+
if actual_length is None:
|
| 95 |
+
actual_length = tf.shape(output)[-1]
|
| 96 |
+
output = cast(output, dtype)
|
| 97 |
+
if x.shape.rank == 1:
|
| 98 |
+
output_shape = (actual_length,)
|
| 99 |
+
else:
|
| 100 |
+
batch_size = output.shape[0]
|
| 101 |
+
if batch_size is None:
|
| 102 |
+
batch_size = tf.shape(output)[0]
|
| 103 |
+
output_shape = (batch_size, actual_length)
|
| 104 |
+
return tf.SparseTensor(
|
| 105 |
+
indices=output.indices,
|
| 106 |
+
values=output.values,
|
| 107 |
+
dense_shape=output_shape,
|
| 108 |
+
)
|
| 109 |
+
return tf.cast(
|
| 110 |
+
tf.math.bincount(x, weights=weights, minlength=minlength, axis=-1),
|
| 111 |
+
dtype,
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@functools.lru_cache(512)
|
| 116 |
+
def _normalize_einsum_subscripts(subscripts):
|
| 117 |
+
# string.ascii_letters
|
| 118 |
+
mapping = {}
|
| 119 |
+
normalized_subscripts = ""
|
| 120 |
+
for c in subscripts:
|
| 121 |
+
if c in string.ascii_letters:
|
| 122 |
+
if c not in mapping:
|
| 123 |
+
mapping[c] = string.ascii_letters[len(mapping)]
|
| 124 |
+
normalized_subscripts += mapping[c]
|
| 125 |
+
else:
|
| 126 |
+
normalized_subscripts += c
|
| 127 |
+
return normalized_subscripts
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def einsum(subscripts, *operands, **kwargs):
|
| 131 |
+
operands = tree.map_structure(convert_to_tensor, operands)
|
| 132 |
+
subscripts = _normalize_einsum_subscripts(subscripts)
|
| 133 |
+
|
| 134 |
+
def is_valid_for_custom_ops(subscripts, *operands):
|
| 135 |
+
# Check that `subscripts` is supported and the shape of operands is not
|
| 136 |
+
# `None`.
|
| 137 |
+
if subscripts in [
|
| 138 |
+
"a,b->ab",
|
| 139 |
+
"ab,b->a",
|
| 140 |
+
"ab,bc->ac",
|
| 141 |
+
"ab,cb->ac",
|
| 142 |
+
"abc,cd->abd",
|
| 143 |
+
"abc,dc->abd",
|
| 144 |
+
"abcd,abde->abce",
|
| 145 |
+
"abcd,abed->abce",
|
| 146 |
+
"abcd,acbe->adbe",
|
| 147 |
+
"abcd,adbe->acbe",
|
| 148 |
+
"abcd,aecd->acbe",
|
| 149 |
+
"abcd,aecd->aceb",
|
| 150 |
+
]:
|
| 151 |
+
# These subscripts don't require the shape information
|
| 152 |
+
return True
|
| 153 |
+
elif subscripts == "abc,cde->abde":
|
| 154 |
+
_, b1, c1 = operands[0].shape
|
| 155 |
+
c2, d2, e2 = operands[1].shape
|
| 156 |
+
b, c, d, e = b1, c1 or c2, d2, e2
|
| 157 |
+
if None in (b, c, d, e):
|
| 158 |
+
return False
|
| 159 |
+
return True
|
| 160 |
+
elif subscripts == "abc,dce->abde":
|
| 161 |
+
_, b1, c1 = operands[0].shape
|
| 162 |
+
d2, c2, e2 = operands[1].shape
|
| 163 |
+
b, c, d, e = b1, c1 or c2, d2, e2
|
| 164 |
+
if None in (b, c, d, e):
|
| 165 |
+
return False
|
| 166 |
+
return True
|
| 167 |
+
elif subscripts == "abc,dec->abde":
|
| 168 |
+
_, b1, c1 = operands[0].shape
|
| 169 |
+
d2, e2, c2 = operands[1].shape
|
| 170 |
+
b, c, d, e = b1, c1 or c2, d2, e2
|
| 171 |
+
if None in (b, c, d, e):
|
| 172 |
+
return False
|
| 173 |
+
return True
|
| 174 |
+
elif subscripts == "abcd,cde->abe":
|
| 175 |
+
_, b1, c1, d1 = operands[0].shape
|
| 176 |
+
c2, d2, e2 = operands[1].shape
|
| 177 |
+
b, c, d, e = b1, c1 or c2, d1 or d2, e2
|
| 178 |
+
if None in (b, c, d, e):
|
| 179 |
+
return False
|
| 180 |
+
return True
|
| 181 |
+
elif subscripts == "abcd,ced->abe":
|
| 182 |
+
_, b1, c1, d1 = operands[0].shape
|
| 183 |
+
c2, e2, d2 = operands[1].shape
|
| 184 |
+
b, c, d, e = b1, c1 or c2, d1 or d2, e2
|
| 185 |
+
if None in (b, c, d, e):
|
| 186 |
+
return False
|
| 187 |
+
return True
|
| 188 |
+
elif subscripts == "abcd,ecd->abe":
|
| 189 |
+
_, b1, c1, d1 = operands[0].shape
|
| 190 |
+
e2, c2, d2 = operands[1].shape
|
| 191 |
+
b, c, d, e = b1, c1 or c2, d1 or d2, e2
|
| 192 |
+
if None in (b, c, d, e):
|
| 193 |
+
return False
|
| 194 |
+
return True
|
| 195 |
+
elif subscripts == "abcde,aebf->adbcf":
|
| 196 |
+
_, b1, c1, d1, e1 = operands[0].shape
|
| 197 |
+
_, e2, b2, f2 = operands[1].shape
|
| 198 |
+
b, c, d, e, f = b1 or b2, c1, d1, e1 or e2, f2
|
| 199 |
+
if None in (b, c, d, e, f):
|
| 200 |
+
return False
|
| 201 |
+
return True
|
| 202 |
+
elif subscripts == "abcde,afce->acdbf":
|
| 203 |
+
_, b1, c1, d1, e1 = operands[0].shape
|
| 204 |
+
_, f2, c2, e2 = operands[1].shape
|
| 205 |
+
b, c, d, e, f = b1, c1 or c2, d1, e1 or e2, f2
|
| 206 |
+
if None in (b, c, d, e, f):
|
| 207 |
+
return False
|
| 208 |
+
return True
|
| 209 |
+
else:
|
| 210 |
+
# No match in subscripts
|
| 211 |
+
return False
|
| 212 |
+
|
| 213 |
+
def use_custom_ops(subscripts, *operands, output_type):
|
| 214 |
+
# Replace tf.einsum with custom ops to utilize hardware-accelerated
|
| 215 |
+
# matmul
|
| 216 |
+
x, y = operands[0], operands[1]
|
| 217 |
+
if subscripts == "a,b->ab":
|
| 218 |
+
x = tf.expand_dims(x, axis=-1)
|
| 219 |
+
y = tf.expand_dims(y, axis=0)
|
| 220 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 221 |
+
elif subscripts == "ab,b->a":
|
| 222 |
+
y = tf.expand_dims(y, axis=-1)
|
| 223 |
+
result = tf.matmul(x, y, output_type=output_type)
|
| 224 |
+
return tf.squeeze(result, axis=-1)
|
| 225 |
+
elif subscripts == "ab,bc->ac":
|
| 226 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 227 |
+
elif subscripts == "ab,cb->ac":
|
| 228 |
+
y = tf.transpose(y, [1, 0])
|
| 229 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 230 |
+
elif subscripts == "abc,cd->abd":
|
| 231 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 232 |
+
elif subscripts == "abc,cde->abde":
|
| 233 |
+
_, b1, c1 = x.shape
|
| 234 |
+
c2, d2, e2 = y.shape
|
| 235 |
+
b, c, d, e = b1, c1 or c2, d2, e2
|
| 236 |
+
y = tf.reshape(y, [c, -1])
|
| 237 |
+
result = tf.matmul(x, y, output_type=output_type)
|
| 238 |
+
return tf.reshape(result, [-1, b, d, e])
|
| 239 |
+
elif subscripts == "abc,dc->abd":
|
| 240 |
+
y = tf.transpose(y, [1, 0])
|
| 241 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 242 |
+
elif subscripts == "abc,dce->abde":
|
| 243 |
+
_, b1, c1 = x.shape
|
| 244 |
+
d2, c2, e2 = y.shape
|
| 245 |
+
b, c, d, e = b1, c1 or c2, d2, e2
|
| 246 |
+
y = tf.transpose(y, [1, 0, 2]) # cde
|
| 247 |
+
y = tf.reshape(y, [c, -1])
|
| 248 |
+
result = tf.matmul(x, y, output_type=output_type)
|
| 249 |
+
return tf.reshape(result, [-1, b, d, e])
|
| 250 |
+
elif subscripts == "abc,dec->abde":
|
| 251 |
+
_, b1, c1 = x.shape
|
| 252 |
+
d2, e2, c2 = y.shape
|
| 253 |
+
b, c, d, e = b1, c1 or c2, d2, e2
|
| 254 |
+
y = tf.transpose(y, [2, 0, 1]) # cde
|
| 255 |
+
y = tf.reshape(y, [c, -1])
|
| 256 |
+
result = tf.matmul(x, y, output_type=output_type)
|
| 257 |
+
return tf.reshape(result, [-1, b, d, e])
|
| 258 |
+
elif subscripts == "abcd,abde->abce":
|
| 259 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 260 |
+
elif subscripts == "abcd,abed->abce":
|
| 261 |
+
y = tf.transpose(y, [0, 1, 3, 2])
|
| 262 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 263 |
+
elif subscripts == "abcd,acbe->adbe":
|
| 264 |
+
x = tf.transpose(x, [0, 1, 3, 2])
|
| 265 |
+
y = tf.transpose(y, [0, 2, 1, 3])
|
| 266 |
+
result = tf.matmul(x, y, output_type=output_type)
|
| 267 |
+
return tf.transpose(result, [0, 2, 1, 3])
|
| 268 |
+
elif subscripts == "abcd,adbe->acbe":
|
| 269 |
+
y = tf.transpose(y, [0, 2, 1, 3]) # abde
|
| 270 |
+
result = tf.matmul(x, y, output_type=output_type) # abce
|
| 271 |
+
return tf.transpose(result, [0, 2, 1, 3])
|
| 272 |
+
elif subscripts == "abcd,aecd->acbe":
|
| 273 |
+
x = tf.transpose(x, [0, 2, 1, 3]) # acbd
|
| 274 |
+
y = tf.transpose(y, [0, 2, 3, 1]) # acde
|
| 275 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 276 |
+
elif subscripts == "abcd,aecd->aceb":
|
| 277 |
+
x = tf.transpose(x, [0, 2, 1, 3])
|
| 278 |
+
y = tf.transpose(y, [0, 2, 3, 1])
|
| 279 |
+
result = tf.matmul(x, y, output_type=output_type) # acbe
|
| 280 |
+
return tf.transpose(result, [0, 1, 3, 2])
|
| 281 |
+
elif subscripts == "abcd,cde->abe":
|
| 282 |
+
_, b1, c1, d1 = x.shape
|
| 283 |
+
c2, d2, e2 = y.shape
|
| 284 |
+
b, c, d, e = b1, c1 or c2, d1 or d2, e2
|
| 285 |
+
x = tf.reshape(x, [-1, b, c * d])
|
| 286 |
+
y = tf.reshape(y, [-1, e])
|
| 287 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 288 |
+
elif subscripts == "abcd,ced->abe":
|
| 289 |
+
_, b1, c1, d1 = x.shape
|
| 290 |
+
c2, e2, d2 = y.shape
|
| 291 |
+
b, c, d, e = b1, c1 or c2, d1 or d2, e2
|
| 292 |
+
x = tf.reshape(x, [-1, b, c * d])
|
| 293 |
+
y = tf.transpose(y, [0, 2, 1])
|
| 294 |
+
y = tf.reshape(y, [-1, e])
|
| 295 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 296 |
+
elif subscripts == "abcd,ecd->abe":
|
| 297 |
+
_, b1, c1, d1 = x.shape
|
| 298 |
+
e2, c2, d2 = y.shape
|
| 299 |
+
b, c, d, e = b1, c1 or c2, d1 or d2, e2
|
| 300 |
+
x = tf.reshape(x, [-1, b, c * d])
|
| 301 |
+
y = tf.transpose(y, [1, 2, 0])
|
| 302 |
+
y = tf.reshape(y, [-1, e])
|
| 303 |
+
return tf.matmul(x, y, output_type=output_type)
|
| 304 |
+
elif subscripts == "abcde,aebf->adbcf":
|
| 305 |
+
_, b1, c1, d1, e1 = x.shape
|
| 306 |
+
_, e2, b2, f2 = y.shape
|
| 307 |
+
b, c, d, e, f = b1 or b2, c1, d1, e1 or e2, f2
|
| 308 |
+
x = tf.reshape(x, [-1, b, c * d, e]) # ab(cd)e
|
| 309 |
+
y = tf.transpose(y, [0, 2, 1, 3]) # abef
|
| 310 |
+
result = tf.matmul(x, y, output_type=output_type) # ab(cd)f
|
| 311 |
+
result = tf.reshape(result, [-1, b, c, d, f]) # abcdf
|
| 312 |
+
return tf.transpose(result, [0, 3, 1, 2, 4])
|
| 313 |
+
elif subscripts == "abcde,afce->acdbf":
|
| 314 |
+
_, b1, c1, d1, e1 = x.shape
|
| 315 |
+
_, f2, c2, e2 = y.shape
|
| 316 |
+
b, c, d, e, f = b1, c1 or c2, d1, e1 or e2, f2
|
| 317 |
+
x = tf.transpose(x, [0, 2, 3, 1, 4]) # acdbe
|
| 318 |
+
x = tf.reshape(x, [-1, c, d * b, e]) # ac(db)e
|
| 319 |
+
y = tf.transpose(y, [0, 2, 3, 1]) # acef
|
| 320 |
+
result = tf.matmul(x, y, output_type=output_type) # ac(db)f
|
| 321 |
+
return tf.reshape(result, [-1, c, d, b, f])
|
| 322 |
+
else:
|
| 323 |
+
raise NotImplementedError
|
| 324 |
+
|
| 325 |
+
dtypes_to_resolve = list(set(standardize_dtype(x.dtype) for x in operands))
|
| 326 |
+
# When operands are of int8, we cast the result to int32 to align with
|
| 327 |
+
# the behavior of jax.
|
| 328 |
+
if len(dtypes_to_resolve) == 1 and dtypes_to_resolve[0] == "int8":
|
| 329 |
+
compute_dtype = "int8"
|
| 330 |
+
result_dtype = "int32"
|
| 331 |
+
output_type = "int32"
|
| 332 |
+
else:
|
| 333 |
+
result_dtype = dtypes.result_type(*dtypes_to_resolve)
|
| 334 |
+
compute_dtype = result_dtype
|
| 335 |
+
output_type = None
|
| 336 |
+
|
| 337 |
+
# TODO: Remove the condition once `tf.einsum` supports int8xint8->int32
|
| 338 |
+
if is_valid_for_custom_ops(subscripts, *operands) and not kwargs:
|
| 339 |
+
# TODO: tf.matmul doesn't support integer dtype if not specifying
|
| 340 |
+
# output_type="int32"
|
| 341 |
+
if "int" in compute_dtype and output_type is None:
|
| 342 |
+
compute_dtype = config.floatx()
|
| 343 |
+
operands = tree.map_structure(
|
| 344 |
+
lambda x: tf.cast(x, compute_dtype), operands
|
| 345 |
+
)
|
| 346 |
+
result = use_custom_ops(subscripts, *operands, output_type=output_type)
|
| 347 |
+
else:
|
| 348 |
+
# TODO: tf.einsum doesn't support integer dtype with gpu
|
| 349 |
+
if "int" in compute_dtype:
|
| 350 |
+
compute_dtype = config.floatx()
|
| 351 |
+
operands = tree.map_structure(
|
| 352 |
+
lambda x: tf.cast(x, compute_dtype), operands
|
| 353 |
+
)
|
| 354 |
+
result = tf.einsum(subscripts, *operands, **kwargs)
|
| 355 |
+
return tf.cast(result, result_dtype)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
@sparse.elementwise_binary_union(sparse.sparse_subtract)
|
| 359 |
+
def subtract(x1, x2):
|
| 360 |
+
if not isinstance(x1, (int, float)):
|
| 361 |
+
x1 = convert_to_tensor(x1)
|
| 362 |
+
if not isinstance(x2, (int, float)):
|
| 363 |
+
x2 = convert_to_tensor(x2)
|
| 364 |
+
dtype = dtypes.result_type(
|
| 365 |
+
getattr(x1, "dtype", type(x1)),
|
| 366 |
+
getattr(x2, "dtype", type(x2)),
|
| 367 |
+
)
|
| 368 |
+
x1 = convert_to_tensor(x1, dtype)
|
| 369 |
+
x2 = convert_to_tensor(x2, dtype)
|
| 370 |
+
return tf.subtract(x1, x2)
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def matmul(x1, x2):
|
| 374 |
+
x1 = convert_to_tensor(x1)
|
| 375 |
+
x2 = convert_to_tensor(x2)
|
| 376 |
+
x1_shape = x1.shape
|
| 377 |
+
x2_shape = x2.shape
|
| 378 |
+
x1_sparse = isinstance(x1, tf.SparseTensor)
|
| 379 |
+
x2_sparse = isinstance(x2, tf.SparseTensor)
|
| 380 |
+
# When both x1 and x2 are of int8 and dense tensor, specifying `output_type`
|
| 381 |
+
# as int32 to enable hardware-accelerated matmul
|
| 382 |
+
x1_dtype = standardize_dtype(x1.dtype)
|
| 383 |
+
x2_dtype = standardize_dtype(x2.dtype)
|
| 384 |
+
if (
|
| 385 |
+
x1_dtype == "int8"
|
| 386 |
+
and x2_dtype == "int8"
|
| 387 |
+
and not x1_sparse
|
| 388 |
+
and not x2_sparse
|
| 389 |
+
and x1_shape.rank != 1 # TODO: support tf.tensordot
|
| 390 |
+
and x2_shape.rank != 1 # TODO: support tf.tensordot
|
| 391 |
+
):
|
| 392 |
+
compute_dtype = "int8"
|
| 393 |
+
result_dtype = "int32"
|
| 394 |
+
output_type = result_dtype
|
| 395 |
+
else:
|
| 396 |
+
# TODO: Typically, GPU and XLA only support float types
|
| 397 |
+
compute_dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
|
| 398 |
+
result_dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 399 |
+
output_type = None
|
| 400 |
+
x1 = tf.cast(x1, compute_dtype)
|
| 401 |
+
x2 = tf.cast(x2, compute_dtype)
|
| 402 |
+
|
| 403 |
+
def with_combined_batch_dimensions(a, b, output_shape, fn_3d):
|
| 404 |
+
a_sparse = isinstance(a, tf.SparseTensor)
|
| 405 |
+
b_sparse = isinstance(b, tf.SparseTensor)
|
| 406 |
+
batch_shape = b.shape[:-2] if b_sparse else a.shape[:-2]
|
| 407 |
+
batch_size = math.prod(batch_shape)
|
| 408 |
+
a3d_shape = [batch_size] + a.shape[-2:]
|
| 409 |
+
a_3d = (
|
| 410 |
+
tf.sparse.reshape(a, a3d_shape)
|
| 411 |
+
if a_sparse
|
| 412 |
+
else tf.reshape(a, a3d_shape)
|
| 413 |
+
)
|
| 414 |
+
b3d_shape = [batch_size] + b.shape[-2:]
|
| 415 |
+
b_3d = (
|
| 416 |
+
tf.sparse.reshape(b, b3d_shape)
|
| 417 |
+
if b_sparse
|
| 418 |
+
else tf.reshape(b, b3d_shape)
|
| 419 |
+
)
|
| 420 |
+
result_3d = fn_3d(a_3d, b_3d)
|
| 421 |
+
return (
|
| 422 |
+
tf.sparse.reshape(result_3d, output_shape)
|
| 423 |
+
if isinstance(result_3d, tf.SparseTensor)
|
| 424 |
+
else tf.reshape(result_3d, output_shape)
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
def sparse_sparse_matmul(a, b):
|
| 428 |
+
dtype = a.values.dtype
|
| 429 |
+
# Convert SparseTensors to CSR SparseMatrix.
|
| 430 |
+
a_csr = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
|
| 431 |
+
a.indices, a.values, a.dense_shape
|
| 432 |
+
)
|
| 433 |
+
b_csr = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
|
| 434 |
+
b.indices, b.values, b.dense_shape
|
| 435 |
+
)
|
| 436 |
+
# Compute the CSR SparseMatrix matrix multiplication.
|
| 437 |
+
result_csr = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
|
| 438 |
+
a_csr, b_csr, dtype
|
| 439 |
+
)
|
| 440 |
+
# Convert the CSR SparseMatrix to a SparseTensor.
|
| 441 |
+
res = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
|
| 442 |
+
result_csr, dtype
|
| 443 |
+
)
|
| 444 |
+
return tf.SparseTensor(res.indices, res.values, res.dense_shape)
|
| 445 |
+
|
| 446 |
+
def embedding_lookup_sparse_dense_matmul(a, b):
|
| 447 |
+
# We need at least one id per rows for embedding_lookup_sparse,
|
| 448 |
+
# otherwise there will be missing rows in the output.
|
| 449 |
+
a, _ = tf.sparse.fill_empty_rows(a, 0)
|
| 450 |
+
# We need to split x1 into separate ids and weights tensors. The ids
|
| 451 |
+
# should be the column indices of x1 and the values of the weights
|
| 452 |
+
# can continue to be the actual x1. The column arrangement of ids
|
| 453 |
+
# and weights does not matter as we sum over columns. See details in
|
| 454 |
+
# the documentation for sparse_ops.sparse_tensor_dense_matmul.
|
| 455 |
+
ids = tf.SparseTensor(
|
| 456 |
+
indices=a.indices,
|
| 457 |
+
values=a.indices[:, 1],
|
| 458 |
+
dense_shape=a.dense_shape,
|
| 459 |
+
)
|
| 460 |
+
return tf.nn.embedding_lookup_sparse(b, ids, a, combiner="sum")
|
| 461 |
+
|
| 462 |
+
# Either a or b is sparse
|
| 463 |
+
def sparse_dense_matmul_3d(a, b):
|
| 464 |
+
return tf.map_fn(
|
| 465 |
+
lambda x: tf.sparse.sparse_dense_matmul(x[0], x[1]),
|
| 466 |
+
elems=(a, b),
|
| 467 |
+
fn_output_signature=a.dtype,
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
if x1_sparse or x2_sparse:
|
| 471 |
+
from keras.src.ops.operation_utils import compute_matmul_output_shape
|
| 472 |
+
|
| 473 |
+
output_shape = compute_matmul_output_shape(x1_shape, x2_shape)
|
| 474 |
+
if x1_sparse and x2_sparse:
|
| 475 |
+
if x1_shape.rank <= 3:
|
| 476 |
+
output = sparse_sparse_matmul(x1, x2)
|
| 477 |
+
else:
|
| 478 |
+
output = with_combined_batch_dimensions(
|
| 479 |
+
x1, x2, output_shape, sparse_sparse_matmul
|
| 480 |
+
)
|
| 481 |
+
else:
|
| 482 |
+
# Sparse * dense or dense * sparse
|
| 483 |
+
sparse_rank = x1_shape.rank if x1_sparse else x2_shape.rank
|
| 484 |
+
|
| 485 |
+
# Special case: embedding_lookup_sparse for sparse * dense, rank 2
|
| 486 |
+
if x1_sparse and sparse_rank == 2:
|
| 487 |
+
output = embedding_lookup_sparse_dense_matmul(x1, x2)
|
| 488 |
+
elif sparse_rank == 2:
|
| 489 |
+
output = tf.sparse.sparse_dense_matmul(x1, x2)
|
| 490 |
+
elif sparse_rank == 3:
|
| 491 |
+
output = sparse_dense_matmul_3d(x1, x2)
|
| 492 |
+
else:
|
| 493 |
+
output = with_combined_batch_dimensions(
|
| 494 |
+
x1, x2, output_shape, sparse_dense_matmul_3d
|
| 495 |
+
)
|
| 496 |
+
output = tf.cast(output, result_dtype)
|
| 497 |
+
output.set_shape(output_shape)
|
| 498 |
+
return output
|
| 499 |
+
else:
|
| 500 |
+
if x1_shape.rank == 2 and x2_shape.rank == 2:
|
| 501 |
+
output = tf.matmul(x1, x2, output_type=output_type)
|
| 502 |
+
elif x2_shape.rank == 1:
|
| 503 |
+
output = tf.tensordot(x1, x2, axes=1)
|
| 504 |
+
elif x1_shape.rank == 1:
|
| 505 |
+
output = tf.tensordot(x1, x2, axes=[[0], [-2]])
|
| 506 |
+
else:
|
| 507 |
+
output = tf.matmul(x1, x2, output_type=output_type)
|
| 508 |
+
return tf.cast(output, result_dtype)
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
@sparse.elementwise_binary_intersection
|
| 512 |
+
def multiply(x1, x2):
|
| 513 |
+
if not isinstance(x1, (int, float)):
|
| 514 |
+
x1 = convert_to_tensor(x1)
|
| 515 |
+
if not isinstance(x2, (int, float)):
|
| 516 |
+
x2 = convert_to_tensor(x2)
|
| 517 |
+
dtype = dtypes.result_type(
|
| 518 |
+
getattr(x1, "dtype", type(x1)),
|
| 519 |
+
getattr(x2, "dtype", type(x2)),
|
| 520 |
+
)
|
| 521 |
+
x1 = convert_to_tensor(x1, dtype)
|
| 522 |
+
x2 = convert_to_tensor(x2, dtype)
|
| 523 |
+
return tf.multiply(x1, x2)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
def mean(x, axis=None, keepdims=False):
|
| 527 |
+
if isinstance(x, tf.IndexedSlices):
|
| 528 |
+
if axis is None:
|
| 529 |
+
# Reduce against all axes, result is a single value and dense.
|
| 530 |
+
# The denominator has to account for `dense_shape`.
|
| 531 |
+
sum = tf.reduce_sum(x.values, keepdims=keepdims)
|
| 532 |
+
return sum / tf.cast(tf.reduce_prod(x.dense_shape), dtype=sum.dtype)
|
| 533 |
+
|
| 534 |
+
axis = to_tuple_or_list(axis)
|
| 535 |
+
if not axis:
|
| 536 |
+
# Empty axis tuple, this is a no-op
|
| 537 |
+
return x
|
| 538 |
+
|
| 539 |
+
dense_shape = tf.convert_to_tensor(x.dense_shape)
|
| 540 |
+
rank = tf.shape(dense_shape)[0]
|
| 541 |
+
# Normalize axis: convert negative values and sort
|
| 542 |
+
axis = [canonicalize_axis(a, rank) for a in axis]
|
| 543 |
+
axis.sort()
|
| 544 |
+
|
| 545 |
+
if axis == [0]:
|
| 546 |
+
# Reduce against `axis=0` only, result is dense.
|
| 547 |
+
# The denominator has to account for `dense_shape[0]`.
|
| 548 |
+
sum = tf.reduce_sum(x.values, axis=0, keepdims=keepdims)
|
| 549 |
+
return sum / tf.cast(dense_shape[0], dtype=sum.dtype)
|
| 550 |
+
elif axis[0] == 0:
|
| 551 |
+
# Reduce against axis 0 and other axes, result is dense.
|
| 552 |
+
# We do `axis=0` separately first. The denominator has to account
|
| 553 |
+
# for `dense_shape[0]`.
|
| 554 |
+
# We use `keepdims=True` in `reduce_sum`` so that we can leave the
|
| 555 |
+
# 0 in axis and do `reduce_mean` with `keepdims` to apply it for all
|
| 556 |
+
# axes.
|
| 557 |
+
sum = tf.reduce_sum(x.values, axis=0, keepdims=True)
|
| 558 |
+
axis_0_mean = sum / tf.cast(dense_shape[0], dtype=sum.dtype)
|
| 559 |
+
return tf.reduce_mean(axis_0_mean, axis=axis, keepdims=keepdims)
|
| 560 |
+
elif keepdims:
|
| 561 |
+
# With `keepdims=True`, result is an `IndexedSlices` with the same
|
| 562 |
+
# indices since axis 0 is not touched. The only thing to do is to
|
| 563 |
+
# correct `dense_shape` to account for dimensions that became 1.
|
| 564 |
+
new_values = tf.reduce_mean(x.values, axis=axis, keepdims=True)
|
| 565 |
+
new_dense_shape = tf.concat(
|
| 566 |
+
[dense_shape[0:1], new_values.shape[1:]], axis=0
|
| 567 |
+
)
|
| 568 |
+
return tf.IndexedSlices(new_values, x.indices, new_dense_shape)
|
| 569 |
+
elif rank == len(axis) + 1:
|
| 570 |
+
# `keepdims=False` and reducing against all axes except 0, result is
|
| 571 |
+
# a 1D tensor, which cannot be `IndexedSlices`. We have to scatter
|
| 572 |
+
# the computed means to construct the correct dense tensor.
|
| 573 |
+
return tf.scatter_nd(
|
| 574 |
+
tf.expand_dims(x.indices, axis=1),
|
| 575 |
+
tf.reduce_mean(x.values, axis=axis),
|
| 576 |
+
[dense_shape[0]],
|
| 577 |
+
)
|
| 578 |
+
else:
|
| 579 |
+
# `keepdims=False`, not reducing against axis 0 and there is at
|
| 580 |
+
# least one other axis we are not reducing against. We simply need
|
| 581 |
+
# to fix `dense_shape` to remove dimensions that were reduced.
|
| 582 |
+
gather_indices = [i for i in range(rank) if i not in axis]
|
| 583 |
+
return tf.IndexedSlices(
|
| 584 |
+
tf.reduce_mean(x.values, axis=axis),
|
| 585 |
+
x.indices,
|
| 586 |
+
tf.gather(x.dense_shape, gather_indices, axis=0),
|
| 587 |
+
)
|
| 588 |
+
x = convert_to_tensor(x)
|
| 589 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 590 |
+
compute_dtype = dtypes.result_type(x.dtype, "float32")
|
| 591 |
+
# `tf.reduce_mean` does not handle low precision (e.g., float16) overflow
|
| 592 |
+
# correctly, so we compute with float32 and cast back to the original type.
|
| 593 |
+
if "int" in ori_dtype or ori_dtype == "bool":
|
| 594 |
+
result_dtype = compute_dtype
|
| 595 |
+
else:
|
| 596 |
+
result_dtype = ori_dtype
|
| 597 |
+
output = tf.reduce_mean(
|
| 598 |
+
tf.cast(x, compute_dtype), axis=axis, keepdims=keepdims
|
| 599 |
+
)
|
| 600 |
+
return tf.cast(output, result_dtype)
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
def max(x, axis=None, keepdims=False, initial=None):
|
| 604 |
+
x = convert_to_tensor(x)
|
| 605 |
+
|
| 606 |
+
# The TensorFlow numpy API implementation doesn't support `initial` so we
|
| 607 |
+
# handle it manually here.
|
| 608 |
+
if initial is not None:
|
| 609 |
+
if standardize_dtype(x.dtype) == "bool":
|
| 610 |
+
x = tf.reduce_any(x, axis=axis, keepdims=keepdims)
|
| 611 |
+
x = tf.math.maximum(tf.cast(x, "int32"), tf.cast(initial, "int32"))
|
| 612 |
+
return tf.cast(x, "bool")
|
| 613 |
+
else:
|
| 614 |
+
x = tf.reduce_max(x, axis=axis, keepdims=keepdims)
|
| 615 |
+
return tf.math.maximum(x, initial)
|
| 616 |
+
|
| 617 |
+
# TensorFlow returns -inf by default for an empty list, but for consistency
|
| 618 |
+
# with other backends and the numpy API we want to throw in this case.
|
| 619 |
+
if tf.executing_eagerly():
|
| 620 |
+
size_x = size(x)
|
| 621 |
+
tf.assert_greater(
|
| 622 |
+
size_x,
|
| 623 |
+
tf.constant(0, dtype=size_x.dtype),
|
| 624 |
+
message="Cannot compute the max of an empty tensor.",
|
| 625 |
+
)
|
| 626 |
+
|
| 627 |
+
if standardize_dtype(x.dtype) == "bool":
|
| 628 |
+
return tf.reduce_any(x, axis=axis, keepdims=keepdims)
|
| 629 |
+
else:
|
| 630 |
+
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
def ones(shape, dtype=None):
|
| 634 |
+
dtype = dtype or config.floatx()
|
| 635 |
+
return tf.ones(shape, dtype=dtype)
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
def zeros(shape, dtype=None):
|
| 639 |
+
dtype = dtype or config.floatx()
|
| 640 |
+
return tf.zeros(shape, dtype=dtype)
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
@sparse.elementwise_unary
|
| 644 |
+
def absolute(x):
|
| 645 |
+
x = convert_to_tensor(x)
|
| 646 |
+
# uintx and bool are always non-negative
|
| 647 |
+
dtype = standardize_dtype(x.dtype)
|
| 648 |
+
if "uint" in dtype or dtype == "bool":
|
| 649 |
+
return x
|
| 650 |
+
return tf.abs(x)
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
def abs(x):
|
| 654 |
+
return absolute(x)
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
def all(x, axis=None, keepdims=False):
|
| 658 |
+
x = tf.cast(x, "bool")
|
| 659 |
+
return tf.reduce_all(x, axis=axis, keepdims=keepdims)
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def any(x, axis=None, keepdims=False):
|
| 663 |
+
x = tf.cast(x, "bool")
|
| 664 |
+
return tf.reduce_any(x, axis=axis, keepdims=keepdims)
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
def amax(x, axis=None, keepdims=False):
|
| 668 |
+
return max(x, axis=axis, keepdims=keepdims)
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
def amin(x, axis=None, keepdims=False):
|
| 672 |
+
return min(x, axis=axis, keepdims=keepdims)
|
| 673 |
+
|
| 674 |
+
|
| 675 |
+
def append(x1, x2, axis=None):
|
| 676 |
+
x1 = convert_to_tensor(x1)
|
| 677 |
+
x2 = convert_to_tensor(x2)
|
| 678 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 679 |
+
x1 = tf.cast(x1, dtype)
|
| 680 |
+
x2 = tf.cast(x2, dtype)
|
| 681 |
+
if axis is None:
|
| 682 |
+
return tf.concat([tf.reshape(x1, [-1]), tf.reshape(x2, [-1])], axis=0)
|
| 683 |
+
else:
|
| 684 |
+
return tf.concat([x1, x2], axis=axis)
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
def arange(start, stop=None, step=1, dtype=None):
|
| 688 |
+
if dtype is None:
|
| 689 |
+
dtypes_to_resolve = [
|
| 690 |
+
getattr(start, "dtype", type(start)),
|
| 691 |
+
getattr(step, "dtype", type(step)),
|
| 692 |
+
]
|
| 693 |
+
if stop is not None:
|
| 694 |
+
dtypes_to_resolve.append(getattr(stop, "dtype", type(stop)))
|
| 695 |
+
dtype = dtypes.result_type(*dtypes_to_resolve)
|
| 696 |
+
dtype = standardize_dtype(dtype)
|
| 697 |
+
try:
|
| 698 |
+
out = tf.range(start, stop, delta=step, dtype=dtype)
|
| 699 |
+
except tf.errors.NotFoundError:
|
| 700 |
+
# Some dtypes may not work in eager mode on CPU or GPU.
|
| 701 |
+
out = tf.range(start, stop, delta=step, dtype="float32")
|
| 702 |
+
out = tf.cast(out, dtype)
|
| 703 |
+
return out
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
@sparse.densifying_unary(0.5 * np.pi)
|
| 707 |
+
def arccos(x):
|
| 708 |
+
x = convert_to_tensor(x)
|
| 709 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 710 |
+
dtype = config.floatx()
|
| 711 |
+
else:
|
| 712 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 713 |
+
x = tf.cast(x, dtype)
|
| 714 |
+
return tf.math.acos(x)
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
@sparse.densifying_unary(np.nan)
|
| 718 |
+
def arccosh(x):
|
| 719 |
+
x = convert_to_tensor(x)
|
| 720 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 721 |
+
dtype = config.floatx()
|
| 722 |
+
else:
|
| 723 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 724 |
+
x = tf.cast(x, dtype)
|
| 725 |
+
return tf.math.acosh(x)
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
@sparse.elementwise_unary
|
| 729 |
+
def arcsin(x):
|
| 730 |
+
x = convert_to_tensor(x)
|
| 731 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 732 |
+
dtype = config.floatx()
|
| 733 |
+
else:
|
| 734 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 735 |
+
x = tf.cast(x, dtype)
|
| 736 |
+
return tf.math.asin(x)
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
@sparse.elementwise_unary
|
| 740 |
+
def arcsinh(x):
|
| 741 |
+
x = convert_to_tensor(x)
|
| 742 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 743 |
+
dtype = config.floatx()
|
| 744 |
+
else:
|
| 745 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 746 |
+
x = tf.cast(x, dtype)
|
| 747 |
+
return tf.math.asinh(x)
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
@sparse.elementwise_unary
|
| 751 |
+
def arctan(x):
|
| 752 |
+
x = convert_to_tensor(x)
|
| 753 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 754 |
+
dtype = config.floatx()
|
| 755 |
+
else:
|
| 756 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 757 |
+
x = tf.cast(x, dtype)
|
| 758 |
+
return tf.math.atan(x)
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
def arctan2(x1, x2):
|
| 762 |
+
x1 = convert_to_tensor(x1)
|
| 763 |
+
x2 = convert_to_tensor(x2)
|
| 764 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
|
| 765 |
+
x1 = tf.cast(x1, dtype)
|
| 766 |
+
x2 = tf.cast(x2, dtype)
|
| 767 |
+
return tf.math.atan2(x1, x2)
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
@sparse.elementwise_unary
|
| 771 |
+
def arctanh(x):
|
| 772 |
+
x = convert_to_tensor(x)
|
| 773 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 774 |
+
dtype = config.floatx()
|
| 775 |
+
else:
|
| 776 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 777 |
+
x = tf.cast(x, dtype)
|
| 778 |
+
return tf.math.atanh(x)
|
| 779 |
+
|
| 780 |
+
|
| 781 |
+
def _keepdims(x, y, axis):
|
| 782 |
+
if axis is None:
|
| 783 |
+
shape = [1 for _ in range(len(x.shape))]
|
| 784 |
+
else:
|
| 785 |
+
shape = list(shape_op(x))
|
| 786 |
+
for axis in tree.flatten(axis):
|
| 787 |
+
shape[axis] = 1
|
| 788 |
+
y = tf.reshape(y, shape)
|
| 789 |
+
return y
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
def argmax(x, axis=None, keepdims=False):
|
| 793 |
+
_x = x
|
| 794 |
+
if axis is None:
|
| 795 |
+
x = tf.reshape(x, [-1])
|
| 796 |
+
y = tf.argmax(x, axis=axis, output_type="int32")
|
| 797 |
+
if keepdims:
|
| 798 |
+
y = _keepdims(_x, y, axis)
|
| 799 |
+
return y
|
| 800 |
+
|
| 801 |
+
|
| 802 |
+
def argmin(x, axis=None, keepdims=False):
|
| 803 |
+
_x = x
|
| 804 |
+
if axis is None:
|
| 805 |
+
x = tf.reshape(x, [-1])
|
| 806 |
+
y = tf.argmin(x, axis=axis, output_type="int32")
|
| 807 |
+
if keepdims:
|
| 808 |
+
y = _keepdims(_x, y, axis)
|
| 809 |
+
return y
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
def argsort(x, axis=-1):
|
| 813 |
+
x = convert_to_tensor(x)
|
| 814 |
+
if standardize_dtype(x.dtype) == "bool":
|
| 815 |
+
x = tf.cast(x, "uint8")
|
| 816 |
+
|
| 817 |
+
x_shape = x.shape
|
| 818 |
+
if x_shape.rank == 0:
|
| 819 |
+
return tf.cast([0], "int32")
|
| 820 |
+
|
| 821 |
+
if axis is None:
|
| 822 |
+
x = tf.reshape(x, [-1])
|
| 823 |
+
axis = 0
|
| 824 |
+
return tf.argsort(x, axis=axis)
|
| 825 |
+
|
| 826 |
+
|
| 827 |
+
def array(x, dtype=None):
|
| 828 |
+
return convert_to_tensor(x, dtype=dtype)
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
def average(x, axis=None, weights=None):
|
| 832 |
+
x = convert_to_tensor(x)
|
| 833 |
+
|
| 834 |
+
if weights is None: # Treat all weights as 1
|
| 835 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 836 |
+
x = tf.cast(x, dtype)
|
| 837 |
+
avg = tf.reduce_mean(x, axis=axis)
|
| 838 |
+
else:
|
| 839 |
+
weights = convert_to_tensor(weights)
|
| 840 |
+
dtype = dtypes.result_type(x.dtype, weights.dtype, float)
|
| 841 |
+
x = tf.cast(x, dtype)
|
| 842 |
+
weights = tf.cast(weights, dtype)
|
| 843 |
+
|
| 844 |
+
def _rank_equal_case():
|
| 845 |
+
weights_sum = tf.reduce_sum(weights, axis=axis)
|
| 846 |
+
return tf.reduce_sum(x * weights, axis=axis) / weights_sum
|
| 847 |
+
|
| 848 |
+
def _rank_not_equal_case():
|
| 849 |
+
weights_sum = tf.reduce_sum(weights)
|
| 850 |
+
axes = tf.convert_to_tensor([[axis], [0]])
|
| 851 |
+
return tf.tensordot(x, weights, axes) / weights_sum
|
| 852 |
+
|
| 853 |
+
if axis is None:
|
| 854 |
+
avg = _rank_equal_case()
|
| 855 |
+
else:
|
| 856 |
+
if len(x.shape) == len(weights.shape):
|
| 857 |
+
avg = _rank_equal_case()
|
| 858 |
+
else:
|
| 859 |
+
avg = _rank_not_equal_case()
|
| 860 |
+
return avg
|
| 861 |
+
|
| 862 |
+
|
| 863 |
+
def bitwise_and(x, y):
|
| 864 |
+
x = convert_to_tensor(x)
|
| 865 |
+
y = convert_to_tensor(y)
|
| 866 |
+
dtype = dtypes.result_type(x.dtype, y.dtype)
|
| 867 |
+
x = tf.cast(x, dtype)
|
| 868 |
+
y = tf.cast(y, dtype)
|
| 869 |
+
return tf.bitwise.bitwise_and(x, y)
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
def bitwise_invert(x):
|
| 873 |
+
x = convert_to_tensor(x)
|
| 874 |
+
return tf.bitwise.invert(x)
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
def bitwise_not(x):
|
| 878 |
+
return bitwise_invert(x)
|
| 879 |
+
|
| 880 |
+
|
| 881 |
+
def bitwise_or(x, y):
|
| 882 |
+
x = convert_to_tensor(x)
|
| 883 |
+
y = convert_to_tensor(y)
|
| 884 |
+
dtype = dtypes.result_type(x.dtype, y.dtype)
|
| 885 |
+
x = tf.cast(x, dtype)
|
| 886 |
+
y = tf.cast(y, dtype)
|
| 887 |
+
return tf.bitwise.bitwise_or(x, y)
|
| 888 |
+
|
| 889 |
+
|
| 890 |
+
def bitwise_xor(x, y):
|
| 891 |
+
x = convert_to_tensor(x)
|
| 892 |
+
y = convert_to_tensor(y)
|
| 893 |
+
dtype = dtypes.result_type(x.dtype, y.dtype)
|
| 894 |
+
x = tf.cast(x, dtype)
|
| 895 |
+
y = tf.cast(y, dtype)
|
| 896 |
+
return tf.bitwise.bitwise_xor(x, y)
|
| 897 |
+
|
| 898 |
+
|
| 899 |
+
def bitwise_left_shift(x, y):
|
| 900 |
+
x = convert_to_tensor(x)
|
| 901 |
+
y = convert_to_tensor(y)
|
| 902 |
+
dtype = dtypes.result_type(x.dtype, y.dtype)
|
| 903 |
+
x = tf.cast(x, dtype)
|
| 904 |
+
y = tf.cast(y, dtype)
|
| 905 |
+
return tf.bitwise.left_shift(x, y)
|
| 906 |
+
|
| 907 |
+
|
| 908 |
+
def left_shift(x, y):
|
| 909 |
+
return bitwise_left_shift(x, y)
|
| 910 |
+
|
| 911 |
+
|
| 912 |
+
def bitwise_right_shift(x, y):
|
| 913 |
+
x = convert_to_tensor(x)
|
| 914 |
+
y = convert_to_tensor(y)
|
| 915 |
+
dtype = dtypes.result_type(x.dtype, y.dtype)
|
| 916 |
+
x = tf.cast(x, dtype)
|
| 917 |
+
y = tf.cast(y, dtype)
|
| 918 |
+
return tf.bitwise.right_shift(x, y)
|
| 919 |
+
|
| 920 |
+
|
| 921 |
+
def right_shift(x, y):
|
| 922 |
+
return bitwise_right_shift(x, y)
|
| 923 |
+
|
| 924 |
+
|
| 925 |
+
def broadcast_to(x, shape):
|
| 926 |
+
return tf.broadcast_to(x, shape)
|
| 927 |
+
|
| 928 |
+
|
| 929 |
+
@sparse.elementwise_unary
|
| 930 |
+
def ceil(x):
|
| 931 |
+
x = convert_to_tensor(x)
|
| 932 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 933 |
+
dtype = config.floatx()
|
| 934 |
+
else:
|
| 935 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 936 |
+
x = tf.cast(x, dtype)
|
| 937 |
+
return tf.math.ceil(x)
|
| 938 |
+
|
| 939 |
+
|
| 940 |
+
def clip(x, x_min, x_max):
|
| 941 |
+
dtype = standardize_dtype(x.dtype)
|
| 942 |
+
if dtype == "bool":
|
| 943 |
+
x = tf.cast(x, "int32")
|
| 944 |
+
return tf.clip_by_value(x, x_min, x_max)
|
| 945 |
+
|
| 946 |
+
|
| 947 |
+
def concatenate(xs, axis=0):
|
| 948 |
+
sparse_count = builtins.sum(isinstance(x, tf.SparseTensor) for x in xs)
|
| 949 |
+
if sparse_count:
|
| 950 |
+
if sparse_count == len(xs):
|
| 951 |
+
return tf.sparse.concat(axis=axis, sp_inputs=xs)
|
| 952 |
+
else:
|
| 953 |
+
xs = [
|
| 954 |
+
(
|
| 955 |
+
convert_to_tensor(x, sparse=False)
|
| 956 |
+
if isinstance(x, tf.SparseTensor)
|
| 957 |
+
else x
|
| 958 |
+
)
|
| 959 |
+
for x in xs
|
| 960 |
+
]
|
| 961 |
+
xs = tree.map_structure(convert_to_tensor, xs)
|
| 962 |
+
dtype_set = set([x.dtype for x in xs])
|
| 963 |
+
if len(dtype_set) > 1:
|
| 964 |
+
dtype = dtypes.result_type(*dtype_set)
|
| 965 |
+
xs = tree.map_structure(lambda x: tf.cast(x, dtype), xs)
|
| 966 |
+
return tf.concat(xs, axis=axis)
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
@sparse.elementwise_unary
|
| 970 |
+
def conjugate(x):
|
| 971 |
+
return tf.math.conj(x)
|
| 972 |
+
|
| 973 |
+
|
| 974 |
+
@sparse.elementwise_unary
|
| 975 |
+
def conj(x):
|
| 976 |
+
return tf.math.conj(x)
|
| 977 |
+
|
| 978 |
+
|
| 979 |
+
@sparse.elementwise_unary
|
| 980 |
+
def copy(x):
|
| 981 |
+
x = convert_to_tensor(x)
|
| 982 |
+
return tf.identity(x)
|
| 983 |
+
|
| 984 |
+
|
| 985 |
+
@sparse.densifying_unary(1)
|
| 986 |
+
def cos(x):
|
| 987 |
+
x = convert_to_tensor(x)
|
| 988 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 989 |
+
dtype = config.floatx()
|
| 990 |
+
else:
|
| 991 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 992 |
+
x = tf.cast(x, dtype)
|
| 993 |
+
return tf.math.cos(x)
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
@sparse.densifying_unary(1)
|
| 997 |
+
def cosh(x):
|
| 998 |
+
x = convert_to_tensor(x)
|
| 999 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 1000 |
+
dtype = config.floatx()
|
| 1001 |
+
else:
|
| 1002 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 1003 |
+
x = tf.cast(x, dtype)
|
| 1004 |
+
return tf.math.cosh(x)
|
| 1005 |
+
|
| 1006 |
+
|
| 1007 |
+
def count_nonzero(x, axis=None):
|
| 1008 |
+
return tf.math.count_nonzero(x, axis=axis, dtype="int32")
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
|
| 1012 |
+
x1 = convert_to_tensor(x1)
|
| 1013 |
+
x2 = convert_to_tensor(x2)
|
| 1014 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 1015 |
+
x1 = tf.cast(x1, dtype)
|
| 1016 |
+
x2 = tf.cast(x2, dtype)
|
| 1017 |
+
|
| 1018 |
+
if axis is not None:
|
| 1019 |
+
axisa = axis
|
| 1020 |
+
axisb = axis
|
| 1021 |
+
axisc = axis
|
| 1022 |
+
x1 = moveaxis(x1, axisa, -1)
|
| 1023 |
+
x2 = moveaxis(x2, axisb, -1)
|
| 1024 |
+
|
| 1025 |
+
def maybe_pad_zeros(x, size_of_last_dim):
|
| 1026 |
+
def pad_zeros(x):
|
| 1027 |
+
return tf.pad(
|
| 1028 |
+
x,
|
| 1029 |
+
tf.concat(
|
| 1030 |
+
[
|
| 1031 |
+
tf.zeros([tf.rank(x) - 1, 2], "int32"),
|
| 1032 |
+
tf.constant([[0, 1]], "int32"),
|
| 1033 |
+
],
|
| 1034 |
+
axis=0,
|
| 1035 |
+
),
|
| 1036 |
+
)
|
| 1037 |
+
|
| 1038 |
+
if isinstance(size_of_last_dim, int):
|
| 1039 |
+
if size_of_last_dim == 2:
|
| 1040 |
+
return pad_zeros(x)
|
| 1041 |
+
return x
|
| 1042 |
+
|
| 1043 |
+
return tf.cond(
|
| 1044 |
+
tf.equal(size_of_last_dim, 2), lambda: pad_zeros(x), lambda: x
|
| 1045 |
+
)
|
| 1046 |
+
|
| 1047 |
+
x1_dim = shape_op(x1)[-1]
|
| 1048 |
+
x2_dim = shape_op(x2)[-1]
|
| 1049 |
+
|
| 1050 |
+
x1 = maybe_pad_zeros(x1, x1_dim)
|
| 1051 |
+
x2 = maybe_pad_zeros(x2, x2_dim)
|
| 1052 |
+
|
| 1053 |
+
# Broadcast each other
|
| 1054 |
+
shape = shape_op(x1)
|
| 1055 |
+
|
| 1056 |
+
shape = tf.broadcast_dynamic_shape(shape, shape_op(x2))
|
| 1057 |
+
x1 = tf.broadcast_to(x1, shape)
|
| 1058 |
+
x2 = tf.broadcast_to(x2, shape)
|
| 1059 |
+
|
| 1060 |
+
c = tf.linalg.cross(x1, x2)
|
| 1061 |
+
|
| 1062 |
+
if isinstance(x1_dim, int) and isinstance(x2_dim, int):
|
| 1063 |
+
if (x1_dim == 2) & (x2_dim == 2):
|
| 1064 |
+
return c[..., 2]
|
| 1065 |
+
return moveaxis(c, -1, axisc)
|
| 1066 |
+
|
| 1067 |
+
return tf.cond(
|
| 1068 |
+
(x1_dim == 2) & (x2_dim == 2),
|
| 1069 |
+
lambda: c[..., 2],
|
| 1070 |
+
lambda: moveaxis(c, -1, axisc),
|
| 1071 |
+
)
|
| 1072 |
+
|
| 1073 |
+
|
| 1074 |
+
def cumprod(x, axis=None, dtype=None):
|
| 1075 |
+
x = convert_to_tensor(x, dtype=dtype)
|
| 1076 |
+
# tf.math.cumprod doesn't support bool
|
| 1077 |
+
if standardize_dtype(x.dtype) == "bool":
|
| 1078 |
+
x = tf.cast(x, "int32")
|
| 1079 |
+
if axis is None:
|
| 1080 |
+
x = tf.reshape(x, [-1])
|
| 1081 |
+
axis = 0
|
| 1082 |
+
return tf.math.cumprod(x, axis=axis)
|
| 1083 |
+
|
| 1084 |
+
|
| 1085 |
+
def cumsum(x, axis=None, dtype=None):
|
| 1086 |
+
x = convert_to_tensor(x, dtype=dtype)
|
| 1087 |
+
# tf.math.cumprod doesn't support bool
|
| 1088 |
+
if standardize_dtype(x.dtype) == "bool":
|
| 1089 |
+
x = tf.cast(x, "int32")
|
| 1090 |
+
if axis is None:
|
| 1091 |
+
x = tf.reshape(x, [-1])
|
| 1092 |
+
axis = 0
|
| 1093 |
+
return tf.math.cumsum(x, axis=axis)
|
| 1094 |
+
|
| 1095 |
+
|
| 1096 |
+
def diag(x, k=0):
|
| 1097 |
+
x = convert_to_tensor(x)
|
| 1098 |
+
if len(x.shape) == 1:
|
| 1099 |
+
return tf.cond(
|
| 1100 |
+
tf.equal(tf.size(x), 0),
|
| 1101 |
+
lambda: tf.zeros([builtins.abs(k), builtins.abs(k)], dtype=x.dtype),
|
| 1102 |
+
lambda: tf.linalg.diag(x, k=k),
|
| 1103 |
+
)
|
| 1104 |
+
elif len(x.shape) == 2:
|
| 1105 |
+
return diagonal(x, offset=k)
|
| 1106 |
+
else:
|
| 1107 |
+
raise ValueError(f"`x` must be 1d or 2d. Received: x.shape={x.shape}")
|
| 1108 |
+
|
| 1109 |
+
|
| 1110 |
+
def diagflat(x, k=0):
|
| 1111 |
+
x = convert_to_tensor(x)
|
| 1112 |
+
return diag(tf.reshape(x, [-1]), k)
|
| 1113 |
+
|
| 1114 |
+
|
| 1115 |
+
def diagonal(x, offset=0, axis1=0, axis2=1):
|
| 1116 |
+
x = convert_to_tensor(x)
|
| 1117 |
+
x_rank = x.ndim
|
| 1118 |
+
if (
|
| 1119 |
+
offset == 0
|
| 1120 |
+
and (axis1 == x_rank - 2 or axis1 == -2)
|
| 1121 |
+
and (axis2 == x_rank - 1 or axis2 == -1)
|
| 1122 |
+
):
|
| 1123 |
+
return tf.linalg.diag_part(x)
|
| 1124 |
+
|
| 1125 |
+
x = moveaxis(x, (axis1, axis2), (-2, -1))
|
| 1126 |
+
x_shape = shape_op(x)
|
| 1127 |
+
|
| 1128 |
+
def _zeros():
|
| 1129 |
+
return tf.zeros(tf.concat([x_shape[:-1], [0]], 0), dtype=x.dtype)
|
| 1130 |
+
|
| 1131 |
+
if isinstance(x_shape[-1], int) and isinstance(x_shape[-2], int):
|
| 1132 |
+
if offset <= -1 * x_shape[-2] or offset >= x_shape[-1]:
|
| 1133 |
+
x = _zeros()
|
| 1134 |
+
else:
|
| 1135 |
+
x = tf.cond(
|
| 1136 |
+
tf.logical_or(
|
| 1137 |
+
tf.less_equal(offset, -1 * x_shape[-2]),
|
| 1138 |
+
tf.greater_equal(offset, x_shape[-1]),
|
| 1139 |
+
),
|
| 1140 |
+
lambda: _zeros(),
|
| 1141 |
+
lambda: x,
|
| 1142 |
+
)
|
| 1143 |
+
return tf.linalg.diag_part(x, k=offset)
|
| 1144 |
+
|
| 1145 |
+
|
| 1146 |
+
def diff(a, n=1, axis=-1):
|
| 1147 |
+
a = convert_to_tensor(a)
|
| 1148 |
+
if n == 0:
|
| 1149 |
+
return a
|
| 1150 |
+
elif n < 0:
|
| 1151 |
+
raise ValueError(f"Order `n` must be non-negative. Received n={n}")
|
| 1152 |
+
elif a.ndim == 0:
|
| 1153 |
+
raise ValueError(
|
| 1154 |
+
"`diff` requires input that is at least one dimensional. "
|
| 1155 |
+
f"Received: a={a}"
|
| 1156 |
+
)
|
| 1157 |
+
axis = canonicalize_axis(axis, a.ndim)
|
| 1158 |
+
slice1 = [slice(None)] * a.ndim
|
| 1159 |
+
slice2 = [slice(None)] * a.ndim
|
| 1160 |
+
slice1[axis] = slice(1, None)
|
| 1161 |
+
slice2[axis] = slice(None, -1)
|
| 1162 |
+
slice1_tuple = tuple(slice1)
|
| 1163 |
+
slice2_tuple = tuple(slice2)
|
| 1164 |
+
for _ in range(n):
|
| 1165 |
+
if standardize_dtype(a.dtype) == "bool":
|
| 1166 |
+
a = tf.not_equal(a[slice1_tuple], a[slice2_tuple])
|
| 1167 |
+
else:
|
| 1168 |
+
a = tf.subtract(a[slice1_tuple], a[slice2_tuple])
|
| 1169 |
+
return a
|
| 1170 |
+
|
| 1171 |
+
|
| 1172 |
+
def digitize(x, bins):
|
| 1173 |
+
x = convert_to_tensor(x)
|
| 1174 |
+
bins = list(bins)
|
| 1175 |
+
|
| 1176 |
+
# bins must be float type
|
| 1177 |
+
bins = tree.map_structure(lambda x: float(x), bins)
|
| 1178 |
+
|
| 1179 |
+
# TODO: tf.raw_ops.Bucketize doesn't support bool, bfloat16, float16, int8
|
| 1180 |
+
# int16, uint8, uint16, uint32
|
| 1181 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 1182 |
+
if ori_dtype in ("bool", "int8", "int16", "uint8", "uint16"):
|
| 1183 |
+
x = cast(x, "int32")
|
| 1184 |
+
elif ori_dtype == "uint32":
|
| 1185 |
+
x = cast(x, "int64")
|
| 1186 |
+
elif ori_dtype in ("bfloat16", "float16"):
|
| 1187 |
+
x = cast(x, "float32")
|
| 1188 |
+
|
| 1189 |
+
if isinstance(x, tf.RaggedTensor):
|
| 1190 |
+
return tf.ragged.map_flat_values(
|
| 1191 |
+
lambda y: tf.raw_ops.Bucketize(input=y, boundaries=bins), x
|
| 1192 |
+
)
|
| 1193 |
+
elif isinstance(x, tf.SparseTensor):
|
| 1194 |
+
output = tf.SparseTensor(
|
| 1195 |
+
indices=tf.identity(x.indices),
|
| 1196 |
+
values=tf.raw_ops.Bucketize(input=x.values, boundaries=bins),
|
| 1197 |
+
dense_shape=tf.identity(x.dense_shape),
|
| 1198 |
+
)
|
| 1199 |
+
output.set_shape(x.shape)
|
| 1200 |
+
return output
|
| 1201 |
+
return tf.raw_ops.Bucketize(input=x, boundaries=bins)
|
| 1202 |
+
|
| 1203 |
+
|
| 1204 |
+
def dot(x, y):
|
| 1205 |
+
x = convert_to_tensor(x)
|
| 1206 |
+
y = convert_to_tensor(y)
|
| 1207 |
+
result_dtype = dtypes.result_type(x.dtype, y.dtype)
|
| 1208 |
+
# GPU only supports float types
|
| 1209 |
+
compute_dtype = dtypes.result_type(result_dtype, float)
|
| 1210 |
+
x = tf.cast(x, compute_dtype)
|
| 1211 |
+
y = tf.cast(y, compute_dtype)
|
| 1212 |
+
|
| 1213 |
+
x_shape = x.shape
|
| 1214 |
+
y_shape = y.shape
|
| 1215 |
+
if x_shape.rank == 0 or y_shape.rank == 0:
|
| 1216 |
+
output = x * y
|
| 1217 |
+
elif y_shape.rank == 1:
|
| 1218 |
+
output = tf.tensordot(x, y, axes=[[-1], [-1]])
|
| 1219 |
+
else:
|
| 1220 |
+
output = tf.tensordot(x, y, axes=[[-1], [-2]])
|
| 1221 |
+
return tf.cast(output, result_dtype)
|
| 1222 |
+
|
| 1223 |
+
|
| 1224 |
+
def empty(shape, dtype=None):
|
| 1225 |
+
dtype = dtype or config.floatx()
|
| 1226 |
+
return tf.zeros(shape, dtype=dtype)
|
| 1227 |
+
|
| 1228 |
+
|
| 1229 |
+
def equal(x1, x2):
|
| 1230 |
+
x1 = convert_to_tensor(x1)
|
| 1231 |
+
x2 = convert_to_tensor(x2)
|
| 1232 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 1233 |
+
x1 = tf.cast(x1, dtype)
|
| 1234 |
+
x2 = tf.cast(x2, dtype)
|
| 1235 |
+
return tf.equal(x1, x2)
|
| 1236 |
+
|
| 1237 |
+
|
| 1238 |
+
@sparse.densifying_unary(1)
|
| 1239 |
+
def exp(x):
|
| 1240 |
+
x = convert_to_tensor(x)
|
| 1241 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 1242 |
+
if "int" in ori_dtype or ori_dtype == "bool":
|
| 1243 |
+
x = tf.cast(x, config.floatx())
|
| 1244 |
+
return tf.exp(x)
|
| 1245 |
+
|
| 1246 |
+
|
| 1247 |
+
@sparse.densifying_unary(1)
|
| 1248 |
+
def exp2(x):
|
| 1249 |
+
x = convert_to_tensor(x)
|
| 1250 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 1251 |
+
if "int" in ori_dtype or ori_dtype == "bool":
|
| 1252 |
+
x = tf.cast(x, config.floatx())
|
| 1253 |
+
return tf.math.pow(2.0, x)
|
| 1254 |
+
|
| 1255 |
+
|
| 1256 |
+
def expand_dims(x, axis):
|
| 1257 |
+
x = convert_to_tensor(x)
|
| 1258 |
+
axis = to_tuple_or_list(axis)
|
| 1259 |
+
out_ndim = len(x.shape) + len(axis)
|
| 1260 |
+
axis = sorted([canonicalize_axis(a, out_ndim) for a in axis])
|
| 1261 |
+
if isinstance(x, tf.SparseTensor):
|
| 1262 |
+
from keras.src.ops.operation_utils import (
|
| 1263 |
+
compute_expand_dims_output_shape,
|
| 1264 |
+
)
|
| 1265 |
+
|
| 1266 |
+
output_shape = compute_expand_dims_output_shape(x.shape, axis)
|
| 1267 |
+
for a in axis:
|
| 1268 |
+
x = tf.sparse.expand_dims(x, a)
|
| 1269 |
+
x.set_shape(output_shape)
|
| 1270 |
+
return x
|
| 1271 |
+
for a in axis:
|
| 1272 |
+
x = tf.expand_dims(x, a)
|
| 1273 |
+
return x
|
| 1274 |
+
|
| 1275 |
+
|
| 1276 |
+
@sparse.elementwise_unary
|
| 1277 |
+
def expm1(x):
|
| 1278 |
+
x = convert_to_tensor(x)
|
| 1279 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 1280 |
+
if "int" in ori_dtype or ori_dtype == "bool":
|
| 1281 |
+
x = tf.cast(x, config.floatx())
|
| 1282 |
+
return tf.math.expm1(x)
|
| 1283 |
+
|
| 1284 |
+
|
| 1285 |
+
def flip(x, axis=None):
|
| 1286 |
+
x = convert_to_tensor(x)
|
| 1287 |
+
if axis is None:
|
| 1288 |
+
return tf.reverse(x, tf.range(tf.rank(x)))
|
| 1289 |
+
return tf.reverse(x, [axis])
|
| 1290 |
+
|
| 1291 |
+
|
| 1292 |
+
@sparse.elementwise_unary
|
| 1293 |
+
def floor(x):
|
| 1294 |
+
x = convert_to_tensor(x)
|
| 1295 |
+
dtype = (
|
| 1296 |
+
config.floatx()
|
| 1297 |
+
if standardize_dtype(x.dtype) == "int64"
|
| 1298 |
+
else dtypes.result_type(x.dtype, float)
|
| 1299 |
+
)
|
| 1300 |
+
x = tf.cast(x, dtype)
|
| 1301 |
+
return tf.floor(x)
|
| 1302 |
+
|
| 1303 |
+
|
| 1304 |
+
def full(shape, fill_value, dtype=None):
|
| 1305 |
+
dtype = dtype or config.floatx()
|
| 1306 |
+
fill_value = convert_to_tensor(fill_value, dtype)
|
| 1307 |
+
return tf.broadcast_to(fill_value, shape)
|
| 1308 |
+
|
| 1309 |
+
|
| 1310 |
+
def full_like(x, fill_value, dtype=None):
|
| 1311 |
+
x = convert_to_tensor(x)
|
| 1312 |
+
dtype = dtypes.result_type(dtype or x.dtype)
|
| 1313 |
+
fill_value = convert_to_tensor(fill_value, dtype)
|
| 1314 |
+
return tf.broadcast_to(fill_value, tf.shape(x))
|
| 1315 |
+
|
| 1316 |
+
|
| 1317 |
+
def greater(x1, x2):
|
| 1318 |
+
x1 = convert_to_tensor(x1)
|
| 1319 |
+
x2 = convert_to_tensor(x2)
|
| 1320 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 1321 |
+
x1 = tf.cast(x1, dtype)
|
| 1322 |
+
x2 = tf.cast(x2, dtype)
|
| 1323 |
+
return tf.greater(x1, x2)
|
| 1324 |
+
|
| 1325 |
+
|
| 1326 |
+
def greater_equal(x1, x2):
|
| 1327 |
+
x1 = convert_to_tensor(x1)
|
| 1328 |
+
x2 = convert_to_tensor(x2)
|
| 1329 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 1330 |
+
x1 = tf.cast(x1, dtype)
|
| 1331 |
+
x2 = tf.cast(x2, dtype)
|
| 1332 |
+
return tf.greater_equal(x1, x2)
|
| 1333 |
+
|
| 1334 |
+
|
| 1335 |
+
def hstack(xs):
|
| 1336 |
+
dtype_set = set([getattr(x, "dtype", type(x)) for x in xs])
|
| 1337 |
+
if len(dtype_set) > 1:
|
| 1338 |
+
dtype = dtypes.result_type(*dtype_set)
|
| 1339 |
+
xs = tree.map_structure(lambda x: convert_to_tensor(x, dtype), xs)
|
| 1340 |
+
if len(xs[0].shape) == 1:
|
| 1341 |
+
return tf.concat(xs, axis=0)
|
| 1342 |
+
return tf.concat(xs, axis=1)
|
| 1343 |
+
|
| 1344 |
+
|
| 1345 |
+
def identity(n, dtype=None):
|
| 1346 |
+
return eye(N=n, M=n, dtype=dtype)
|
| 1347 |
+
|
| 1348 |
+
|
| 1349 |
+
@sparse.elementwise_unary
|
| 1350 |
+
def imag(x):
|
| 1351 |
+
return tf.math.imag(x)
|
| 1352 |
+
|
| 1353 |
+
|
| 1354 |
+
def isclose(x1, x2, rtol=1e-5, atol=1e-8, equal_nan=False):
|
| 1355 |
+
x1 = convert_to_tensor(x1)
|
| 1356 |
+
x2 = convert_to_tensor(x2)
|
| 1357 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 1358 |
+
x1 = tf.cast(x1, dtype)
|
| 1359 |
+
x2 = tf.cast(x2, dtype)
|
| 1360 |
+
if "float" in dtype:
|
| 1361 |
+
result = tf.abs(x1 - x2) <= (atol + rtol * tf.abs(x2))
|
| 1362 |
+
if equal_nan:
|
| 1363 |
+
result = result | (is_nan(x1) & is_nan(x2))
|
| 1364 |
+
return result
|
| 1365 |
+
else:
|
| 1366 |
+
return tf.equal(x1, x2)
|
| 1367 |
+
|
| 1368 |
+
|
| 1369 |
+
@sparse.densifying_unary(True)
|
| 1370 |
+
def isfinite(x):
|
| 1371 |
+
x = convert_to_tensor(x)
|
| 1372 |
+
dtype_as_dtype = tf.as_dtype(x.dtype)
|
| 1373 |
+
if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric:
|
| 1374 |
+
return tf.ones(x.shape, tf.bool)
|
| 1375 |
+
return tf.math.is_finite(x)
|
| 1376 |
+
|
| 1377 |
+
|
| 1378 |
+
def isinf(x):
|
| 1379 |
+
x = convert_to_tensor(x)
|
| 1380 |
+
dtype_as_dtype = tf.as_dtype(x.dtype)
|
| 1381 |
+
if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric:
|
| 1382 |
+
return tf.zeros(x.shape, tf.bool)
|
| 1383 |
+
return tf.math.is_inf(x)
|
| 1384 |
+
|
| 1385 |
+
|
| 1386 |
+
def isnan(x):
|
| 1387 |
+
x = convert_to_tensor(x)
|
| 1388 |
+
dtype_as_dtype = tf.as_dtype(x.dtype)
|
| 1389 |
+
if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric:
|
| 1390 |
+
return tf.zeros(x.shape, tf.bool)
|
| 1391 |
+
return tf.math.is_nan(x)
|
| 1392 |
+
|
| 1393 |
+
|
| 1394 |
+
def less(x1, x2):
|
| 1395 |
+
x1 = convert_to_tensor(x1)
|
| 1396 |
+
x2 = convert_to_tensor(x2)
|
| 1397 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 1398 |
+
x1 = tf.cast(x1, dtype)
|
| 1399 |
+
x2 = tf.cast(x2, dtype)
|
| 1400 |
+
return tf.less(x1, x2)
|
| 1401 |
+
|
| 1402 |
+
|
| 1403 |
+
def less_equal(x1, x2):
|
| 1404 |
+
x1 = convert_to_tensor(x1)
|
| 1405 |
+
x2 = convert_to_tensor(x2)
|
| 1406 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 1407 |
+
x1 = tf.cast(x1, dtype)
|
| 1408 |
+
x2 = tf.cast(x2, dtype)
|
| 1409 |
+
return tf.less_equal(x1, x2)
|
| 1410 |
+
|
| 1411 |
+
|
| 1412 |
+
def linspace(
|
| 1413 |
+
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
|
| 1414 |
+
):
|
| 1415 |
+
if num < 0:
|
| 1416 |
+
raise ValueError(
|
| 1417 |
+
f"`num` must be a non-negative integer. Received: num={num}"
|
| 1418 |
+
)
|
| 1419 |
+
if dtype is None:
|
| 1420 |
+
dtypes_to_resolve = [
|
| 1421 |
+
getattr(start, "dtype", type(start)),
|
| 1422 |
+
getattr(stop, "dtype", type(stop)),
|
| 1423 |
+
float,
|
| 1424 |
+
]
|
| 1425 |
+
dtype = dtypes.result_type(*dtypes_to_resolve)
|
| 1426 |
+
else:
|
| 1427 |
+
dtype = standardize_dtype(dtype)
|
| 1428 |
+
start = convert_to_tensor(start, dtype=dtype)
|
| 1429 |
+
stop = convert_to_tensor(stop, dtype=dtype)
|
| 1430 |
+
step = convert_to_tensor(np.nan)
|
| 1431 |
+
if endpoint:
|
| 1432 |
+
result = tf.linspace(start, stop, num, axis=axis)
|
| 1433 |
+
if num > 1:
|
| 1434 |
+
step = (stop - start) / (tf.cast(num, dtype) - 1)
|
| 1435 |
+
else:
|
| 1436 |
+
# tf.linspace doesn't support endpoint=False, so we manually handle it
|
| 1437 |
+
if num > 0:
|
| 1438 |
+
step = (stop - start) / tf.cast(num, dtype)
|
| 1439 |
+
if num > 1:
|
| 1440 |
+
new_stop = tf.cast(stop, step.dtype) - step
|
| 1441 |
+
start = tf.cast(start, new_stop.dtype)
|
| 1442 |
+
result = tf.linspace(start, new_stop, num, axis=axis)
|
| 1443 |
+
else:
|
| 1444 |
+
result = tf.linspace(start, stop, num, axis=axis)
|
| 1445 |
+
if dtype is not None:
|
| 1446 |
+
if "int" in dtype:
|
| 1447 |
+
result = tf.floor(result)
|
| 1448 |
+
result = tf.cast(result, dtype)
|
| 1449 |
+
if retstep:
|
| 1450 |
+
return (result, step)
|
| 1451 |
+
else:
|
| 1452 |
+
return result
|
| 1453 |
+
|
| 1454 |
+
|
| 1455 |
+
@sparse.densifying_unary(-np.inf)
|
| 1456 |
+
def log(x):
|
| 1457 |
+
x = convert_to_tensor(x)
|
| 1458 |
+
dtype = (
|
| 1459 |
+
config.floatx()
|
| 1460 |
+
if standardize_dtype(x.dtype) == "int64"
|
| 1461 |
+
else dtypes.result_type(x.dtype, float)
|
| 1462 |
+
)
|
| 1463 |
+
x = tf.cast(x, dtype)
|
| 1464 |
+
return tf.math.log(x)
|
| 1465 |
+
|
| 1466 |
+
|
| 1467 |
+
@sparse.densifying_unary(-np.inf)
|
| 1468 |
+
def log10(x):
|
| 1469 |
+
x = convert_to_tensor(x)
|
| 1470 |
+
dtype = (
|
| 1471 |
+
config.floatx()
|
| 1472 |
+
if standardize_dtype(x.dtype) == "int64"
|
| 1473 |
+
else dtypes.result_type(x.dtype, float)
|
| 1474 |
+
)
|
| 1475 |
+
x = tf.cast(x, dtype)
|
| 1476 |
+
return tf.math.log(x) / tf.math.log(tf.constant(10, x.dtype))
|
| 1477 |
+
|
| 1478 |
+
|
| 1479 |
+
@sparse.elementwise_unary
|
| 1480 |
+
def log1p(x):
|
| 1481 |
+
x = convert_to_tensor(x)
|
| 1482 |
+
dtype = (
|
| 1483 |
+
config.floatx()
|
| 1484 |
+
if standardize_dtype(x.dtype) == "int64"
|
| 1485 |
+
else dtypes.result_type(x.dtype, float)
|
| 1486 |
+
)
|
| 1487 |
+
x = tf.cast(x, dtype)
|
| 1488 |
+
return tf.math.log1p(x)
|
| 1489 |
+
|
| 1490 |
+
|
| 1491 |
+
@sparse.densifying_unary(-np.inf)
|
| 1492 |
+
def log2(x):
|
| 1493 |
+
x = convert_to_tensor(x)
|
| 1494 |
+
dtype = (
|
| 1495 |
+
config.floatx()
|
| 1496 |
+
if standardize_dtype(x.dtype) == "int64"
|
| 1497 |
+
else dtypes.result_type(x.dtype, float)
|
| 1498 |
+
)
|
| 1499 |
+
x = tf.cast(x, dtype)
|
| 1500 |
+
return tf.math.log(x) / tf.math.log(tf.constant(2, x.dtype))
|
| 1501 |
+
|
| 1502 |
+
|
| 1503 |
+
def logaddexp(x1, x2):
|
| 1504 |
+
x1 = convert_to_tensor(x1)
|
| 1505 |
+
x2 = convert_to_tensor(x2)
|
| 1506 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
|
| 1507 |
+
x1 = tf.cast(x1, dtype)
|
| 1508 |
+
x2 = tf.cast(x2, dtype)
|
| 1509 |
+
delta = x1 - x2
|
| 1510 |
+
return tf.where(
|
| 1511 |
+
tf.math.is_nan(delta),
|
| 1512 |
+
x1 + x2,
|
| 1513 |
+
tf.maximum(x1, x2) + tf.math.log1p(tf.math.exp(-tf.abs(delta))),
|
| 1514 |
+
)
|
| 1515 |
+
|
| 1516 |
+
|
| 1517 |
+
def logical_and(x1, x2):
|
| 1518 |
+
x1 = tf.cast(x1, "bool")
|
| 1519 |
+
x2 = tf.cast(x2, "bool")
|
| 1520 |
+
return tf.logical_and(x1, x2)
|
| 1521 |
+
|
| 1522 |
+
|
| 1523 |
+
def logical_not(x):
|
| 1524 |
+
x = tf.cast(x, "bool")
|
| 1525 |
+
return tf.logical_not(x)
|
| 1526 |
+
|
| 1527 |
+
|
| 1528 |
+
def logical_or(x1, x2):
|
| 1529 |
+
x1 = tf.cast(x1, "bool")
|
| 1530 |
+
x2 = tf.cast(x2, "bool")
|
| 1531 |
+
return tf.logical_or(x1, x2)
|
| 1532 |
+
|
| 1533 |
+
|
| 1534 |
+
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
|
| 1535 |
+
result = linspace(
|
| 1536 |
+
start=start,
|
| 1537 |
+
stop=stop,
|
| 1538 |
+
num=num,
|
| 1539 |
+
endpoint=endpoint,
|
| 1540 |
+
dtype=dtype,
|
| 1541 |
+
axis=axis,
|
| 1542 |
+
)
|
| 1543 |
+
return tf.pow(tf.cast(base, result.dtype), result)
|
| 1544 |
+
|
| 1545 |
+
|
| 1546 |
+
@sparse.elementwise_binary_union(tf.sparse.maximum, densify_mixed=True)
|
| 1547 |
+
def maximum(x1, x2):
|
| 1548 |
+
if not isinstance(x1, (int, float)):
|
| 1549 |
+
x1 = convert_to_tensor(x1)
|
| 1550 |
+
if not isinstance(x2, (int, float)):
|
| 1551 |
+
x2 = convert_to_tensor(x2)
|
| 1552 |
+
dtype = dtypes.result_type(
|
| 1553 |
+
getattr(x1, "dtype", type(x1)),
|
| 1554 |
+
getattr(x2, "dtype", type(x2)),
|
| 1555 |
+
)
|
| 1556 |
+
x1 = convert_to_tensor(x1, dtype)
|
| 1557 |
+
x2 = convert_to_tensor(x2, dtype)
|
| 1558 |
+
return tf.maximum(x1, x2)
|
| 1559 |
+
|
| 1560 |
+
|
| 1561 |
+
def median(x, axis=None, keepdims=False):
|
| 1562 |
+
return quantile(x, 0.5, axis=axis, keepdims=keepdims)
|
| 1563 |
+
|
| 1564 |
+
|
| 1565 |
+
def meshgrid(*x, indexing="xy"):
|
| 1566 |
+
return tf.meshgrid(*x, indexing=indexing)
|
| 1567 |
+
|
| 1568 |
+
|
| 1569 |
+
def min(x, axis=None, keepdims=False, initial=None):
|
| 1570 |
+
x = convert_to_tensor(x)
|
| 1571 |
+
|
| 1572 |
+
# The TensorFlow numpy API implementation doesn't support `initial` so we
|
| 1573 |
+
# handle it manually here.
|
| 1574 |
+
if initial is not None:
|
| 1575 |
+
if standardize_dtype(x.dtype) == "bool":
|
| 1576 |
+
x = tf.reduce_all(x, axis=axis, keepdims=keepdims)
|
| 1577 |
+
x = tf.math.minimum(tf.cast(x, "int32"), tf.cast(initial, "int32"))
|
| 1578 |
+
return tf.cast(x, "bool")
|
| 1579 |
+
else:
|
| 1580 |
+
x = tf.reduce_min(x, axis=axis, keepdims=keepdims)
|
| 1581 |
+
return tf.math.minimum(x, initial)
|
| 1582 |
+
|
| 1583 |
+
# TensorFlow returns inf by default for an empty list, but for consistency
|
| 1584 |
+
# with other backends and the numpy API we want to throw in this case.
|
| 1585 |
+
if tf.executing_eagerly():
|
| 1586 |
+
size_x = size(x)
|
| 1587 |
+
tf.assert_greater(
|
| 1588 |
+
size_x,
|
| 1589 |
+
tf.constant(0, dtype=size_x.dtype),
|
| 1590 |
+
message="Cannot compute the min of an empty tensor.",
|
| 1591 |
+
)
|
| 1592 |
+
|
| 1593 |
+
if standardize_dtype(x.dtype) == "bool":
|
| 1594 |
+
return tf.reduce_all(x, axis=axis, keepdims=keepdims)
|
| 1595 |
+
else:
|
| 1596 |
+
return tf.reduce_min(x, axis=axis, keepdims=keepdims)
|
| 1597 |
+
|
| 1598 |
+
|
| 1599 |
+
@sparse.elementwise_binary_union(tf.sparse.minimum, densify_mixed=True)
|
| 1600 |
+
def minimum(x1, x2):
|
| 1601 |
+
if not isinstance(x1, (int, float)):
|
| 1602 |
+
x1 = convert_to_tensor(x1)
|
| 1603 |
+
if not isinstance(x2, (int, float)):
|
| 1604 |
+
x2 = convert_to_tensor(x2)
|
| 1605 |
+
dtype = dtypes.result_type(
|
| 1606 |
+
getattr(x1, "dtype", type(x1)),
|
| 1607 |
+
getattr(x2, "dtype", type(x2)),
|
| 1608 |
+
)
|
| 1609 |
+
x1 = convert_to_tensor(x1, dtype)
|
| 1610 |
+
x2 = convert_to_tensor(x2, dtype)
|
| 1611 |
+
return tf.minimum(x1, x2)
|
| 1612 |
+
|
| 1613 |
+
|
| 1614 |
+
def mod(x1, x2):
|
| 1615 |
+
x1 = convert_to_tensor(x1)
|
| 1616 |
+
x2 = convert_to_tensor(x2)
|
| 1617 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 1618 |
+
if dtype == "bool":
|
| 1619 |
+
dtype = "int32"
|
| 1620 |
+
x1 = tf.cast(x1, dtype)
|
| 1621 |
+
x2 = tf.cast(x2, dtype)
|
| 1622 |
+
return tf.math.mod(x1, x2)
|
| 1623 |
+
|
| 1624 |
+
|
| 1625 |
+
def moveaxis(x, source, destination):
|
| 1626 |
+
x = convert_to_tensor(x)
|
| 1627 |
+
|
| 1628 |
+
_source = to_tuple_or_list(source)
|
| 1629 |
+
_destination = to_tuple_or_list(destination)
|
| 1630 |
+
_source = tuple(canonicalize_axis(i, x.ndim) for i in _source)
|
| 1631 |
+
_destination = tuple(canonicalize_axis(i, x.ndim) for i in _destination)
|
| 1632 |
+
if len(_source) != len(_destination):
|
| 1633 |
+
raise ValueError(
|
| 1634 |
+
"Inconsistent number of `source` and `destination`. "
|
| 1635 |
+
f"Received: source={source}, destination={destination}"
|
| 1636 |
+
)
|
| 1637 |
+
# Directly return x if no movement is required
|
| 1638 |
+
if _source == _destination:
|
| 1639 |
+
return x
|
| 1640 |
+
perm = [i for i in range(x.ndim) if i not in _source]
|
| 1641 |
+
for dest, src in sorted(zip(_destination, _source)):
|
| 1642 |
+
perm.insert(dest, src)
|
| 1643 |
+
return tf.transpose(x, perm)
|
| 1644 |
+
|
| 1645 |
+
|
| 1646 |
+
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
|
| 1647 |
+
x = convert_to_tensor(x)
|
| 1648 |
+
|
| 1649 |
+
dtype = x.dtype
|
| 1650 |
+
dtype_as_dtype = tf.as_dtype(dtype)
|
| 1651 |
+
if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric:
|
| 1652 |
+
return x
|
| 1653 |
+
|
| 1654 |
+
# Replace NaN with `nan`
|
| 1655 |
+
x = tf.where(tf.math.is_nan(x), tf.constant(nan, dtype), x)
|
| 1656 |
+
|
| 1657 |
+
# Replace positive infinity with `posinf` or `dtype.max`
|
| 1658 |
+
if posinf is None:
|
| 1659 |
+
posinf = dtype.max
|
| 1660 |
+
x = tf.where(tf.math.is_inf(x) & (x > 0), tf.constant(posinf, dtype), x)
|
| 1661 |
+
|
| 1662 |
+
# Replace negative infinity with `neginf` or `dtype.min`
|
| 1663 |
+
if neginf is None:
|
| 1664 |
+
neginf = dtype.min
|
| 1665 |
+
x = tf.where(tf.math.is_inf(x) & (x < 0), tf.constant(neginf, dtype), x)
|
| 1666 |
+
|
| 1667 |
+
return x
|
| 1668 |
+
|
| 1669 |
+
|
| 1670 |
+
def ndim(x):
|
| 1671 |
+
x = convert_to_tensor(x)
|
| 1672 |
+
return x.ndim
|
| 1673 |
+
|
| 1674 |
+
|
| 1675 |
+
def nonzero(x):
|
| 1676 |
+
x = convert_to_tensor(x)
|
| 1677 |
+
result = tf.unstack(tf.where(tf.cast(x, "bool")), x.shape.rank, axis=1)
|
| 1678 |
+
return tree.map_structure(lambda indices: tf.cast(indices, "int32"), result)
|
| 1679 |
+
|
| 1680 |
+
|
| 1681 |
+
def not_equal(x1, x2):
|
| 1682 |
+
x1 = convert_to_tensor(x1)
|
| 1683 |
+
x2 = convert_to_tensor(x2)
|
| 1684 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 1685 |
+
x1 = tf.cast(x1, dtype)
|
| 1686 |
+
x2 = tf.cast(x2, dtype)
|
| 1687 |
+
return tf.not_equal(x1, x2)
|
| 1688 |
+
|
| 1689 |
+
|
| 1690 |
+
def ones_like(x, dtype=None):
|
| 1691 |
+
return tf.ones_like(x, dtype=dtype)
|
| 1692 |
+
|
| 1693 |
+
|
| 1694 |
+
def zeros_like(x, dtype=None):
|
| 1695 |
+
return tf.zeros_like(x, dtype=dtype)
|
| 1696 |
+
|
| 1697 |
+
|
| 1698 |
+
def outer(x1, x2):
|
| 1699 |
+
x1 = convert_to_tensor(x1)
|
| 1700 |
+
x2 = convert_to_tensor(x2)
|
| 1701 |
+
dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 1702 |
+
x1 = tf.cast(x1, dtype)
|
| 1703 |
+
x2 = tf.cast(x2, dtype)
|
| 1704 |
+
return tf.reshape(x1, [-1, 1]) * tf.reshape(x2, [-1])
|
| 1705 |
+
|
| 1706 |
+
|
| 1707 |
+
def pad(x, pad_width, mode="constant", constant_values=None):
|
| 1708 |
+
x = convert_to_tensor(x)
|
| 1709 |
+
kwargs = {}
|
| 1710 |
+
if constant_values is not None:
|
| 1711 |
+
if mode != "constant":
|
| 1712 |
+
raise ValueError(
|
| 1713 |
+
"Argument `constant_values` can only be "
|
| 1714 |
+
"provided when `mode == 'constant'`. "
|
| 1715 |
+
f"Received: mode={mode}"
|
| 1716 |
+
)
|
| 1717 |
+
kwargs["constant_values"] = constant_values
|
| 1718 |
+
pad_width = convert_to_tensor(pad_width, "int32")
|
| 1719 |
+
return tf.pad(x, pad_width, mode.upper(), **kwargs)
|
| 1720 |
+
|
| 1721 |
+
|
| 1722 |
+
def prod(x, axis=None, keepdims=False, dtype=None):
|
| 1723 |
+
x = convert_to_tensor(x)
|
| 1724 |
+
if dtype is None:
|
| 1725 |
+
dtype = dtypes.result_type(x.dtype)
|
| 1726 |
+
if dtype == "bool":
|
| 1727 |
+
dtype = "int32"
|
| 1728 |
+
elif dtype in ("int8", "int16"):
|
| 1729 |
+
dtype = "int32"
|
| 1730 |
+
elif dtype in ("uint8", "uint16"):
|
| 1731 |
+
dtype = "uint32"
|
| 1732 |
+
x = tf.cast(x, dtype)
|
| 1733 |
+
return tf.reduce_prod(x, axis=axis, keepdims=keepdims)
|
| 1734 |
+
|
| 1735 |
+
|
| 1736 |
+
def _quantile(x, q, axis=None, method="linear", keepdims=False):
|
| 1737 |
+
# ref: tfp.stats.percentile
|
| 1738 |
+
# float64 is needed here and below, else we get the wrong index if the array
|
| 1739 |
+
# is huge along axis.
|
| 1740 |
+
q = tf.cast(q, "float64")
|
| 1741 |
+
|
| 1742 |
+
# Move `axis` dims of `x` to the rightmost, call it `y`.
|
| 1743 |
+
if axis is None:
|
| 1744 |
+
y = tf.reshape(x, [-1])
|
| 1745 |
+
else:
|
| 1746 |
+
x_ndims = len(x.shape)
|
| 1747 |
+
# _make_static_axis_non_negative_list
|
| 1748 |
+
axis = [canonicalize_axis(a, x_ndims) for a in axis]
|
| 1749 |
+
|
| 1750 |
+
# _move_dims_to_flat_end
|
| 1751 |
+
other_dims = sorted(set(range(x_ndims)).difference(axis))
|
| 1752 |
+
perm = other_dims + list(axis)
|
| 1753 |
+
x_permed = tf.transpose(a=x, perm=perm)
|
| 1754 |
+
if None not in x.shape:
|
| 1755 |
+
x_shape = list(x.shape)
|
| 1756 |
+
other_shape = [x_shape[i] for i in other_dims]
|
| 1757 |
+
end_shape = [math.prod([x_shape[i] for i in axis])]
|
| 1758 |
+
full_shape = other_shape + end_shape
|
| 1759 |
+
else:
|
| 1760 |
+
other_shape = tf.gather(tf.shape(x), tf.cast(other_dims, tf.int64))
|
| 1761 |
+
full_shape = tf.concat([other_shape, [-1]], axis=0)
|
| 1762 |
+
y = tf.reshape(x_permed, shape=full_shape)
|
| 1763 |
+
|
| 1764 |
+
# Sort (in ascending order) everything which allows multiple calls to sort
|
| 1765 |
+
# only once (under the hood) and use CSE.
|
| 1766 |
+
sorted_y = tf.sort(y, axis=-1, direction="ASCENDING")
|
| 1767 |
+
|
| 1768 |
+
d = tf.cast(tf.shape(y)[-1], "float64")
|
| 1769 |
+
|
| 1770 |
+
def _get_indices(method):
|
| 1771 |
+
"""Get values of y at the indices implied by method."""
|
| 1772 |
+
if method == "lower":
|
| 1773 |
+
indices = tf.math.floor((d - 1) * q)
|
| 1774 |
+
elif method == "higher":
|
| 1775 |
+
indices = tf.math.ceil((d - 1) * q)
|
| 1776 |
+
elif method == "nearest":
|
| 1777 |
+
indices = tf.round((d - 1) * q)
|
| 1778 |
+
# d - 1 will be distinct from d in int32, but not necessarily double.
|
| 1779 |
+
# So clip to avoid out of bounds errors.
|
| 1780 |
+
return tf.clip_by_value(
|
| 1781 |
+
tf.cast(indices, "int32"), 0, tf.shape(y)[-1] - 1
|
| 1782 |
+
)
|
| 1783 |
+
|
| 1784 |
+
if method in ["nearest", "lower", "higher"]:
|
| 1785 |
+
gathered_y = tf.gather(sorted_y, _get_indices(method), axis=-1)
|
| 1786 |
+
elif method == "midpoint":
|
| 1787 |
+
gathered_y = 0.5 * (
|
| 1788 |
+
tf.gather(sorted_y, _get_indices("lower"), axis=-1)
|
| 1789 |
+
+ tf.gather(sorted_y, _get_indices("higher"), axis=-1)
|
| 1790 |
+
)
|
| 1791 |
+
elif method == "linear":
|
| 1792 |
+
larger_y_idx = _get_indices("higher")
|
| 1793 |
+
exact_idx = (d - 1) * q
|
| 1794 |
+
# preserve_gradients
|
| 1795 |
+
smaller_y_idx = tf.maximum(larger_y_idx - 1, 0)
|
| 1796 |
+
larger_y_idx = tf.minimum(smaller_y_idx + 1, tf.shape(y)[-1] - 1)
|
| 1797 |
+
fraction = tf.cast(larger_y_idx, tf.float64) - exact_idx
|
| 1798 |
+
fraction = tf.cast(fraction, y.dtype)
|
| 1799 |
+
gathered_y = (
|
| 1800 |
+
tf.gather(sorted_y, larger_y_idx, axis=-1) * (1 - fraction)
|
| 1801 |
+
+ tf.gather(sorted_y, smaller_y_idx, axis=-1) * fraction
|
| 1802 |
+
)
|
| 1803 |
+
|
| 1804 |
+
# Propagate NaNs
|
| 1805 |
+
if x.dtype in (tf.bfloat16, tf.float16, tf.float32, tf.float64):
|
| 1806 |
+
# Apparently tf.is_nan doesn't like other dtypes
|
| 1807 |
+
nan_batch_members = tf.reduce_any(tf.math.is_nan(x), axis=axis)
|
| 1808 |
+
right_rank_matched_shape = tf.pad(
|
| 1809 |
+
tf.shape(nan_batch_members),
|
| 1810 |
+
paddings=[[0, tf.rank(q)]],
|
| 1811 |
+
constant_values=1,
|
| 1812 |
+
)
|
| 1813 |
+
nan_batch_members = tf.reshape(
|
| 1814 |
+
nan_batch_members, shape=right_rank_matched_shape
|
| 1815 |
+
)
|
| 1816 |
+
nan_value = tf.constant(float("NaN"), dtype=x.dtype)
|
| 1817 |
+
gathered_y = tf.where(nan_batch_members, nan_value, gathered_y)
|
| 1818 |
+
|
| 1819 |
+
# Expand dimensions if requested
|
| 1820 |
+
if keepdims:
|
| 1821 |
+
if axis is None:
|
| 1822 |
+
ones_vec = tf.ones(shape=[tf.rank(x) + tf.rank(q)], dtype="int32")
|
| 1823 |
+
gathered_y *= tf.ones(ones_vec, dtype=gathered_y.dtype)
|
| 1824 |
+
else:
|
| 1825 |
+
for i in sorted(axis):
|
| 1826 |
+
gathered_y = tf.expand_dims(gathered_y, axis=i)
|
| 1827 |
+
|
| 1828 |
+
# rotate_transpose
|
| 1829 |
+
shift_value_static = tf.get_static_value(tf.rank(q))
|
| 1830 |
+
ndims = tf.TensorShape(gathered_y.shape).rank
|
| 1831 |
+
if ndims < 2:
|
| 1832 |
+
return gathered_y
|
| 1833 |
+
shift_value_static = int(
|
| 1834 |
+
math.copysign(1, shift_value_static)
|
| 1835 |
+
* (builtins.abs(shift_value_static) % ndims)
|
| 1836 |
+
)
|
| 1837 |
+
if shift_value_static == 0:
|
| 1838 |
+
return gathered_y
|
| 1839 |
+
perm = collections.deque(range(ndims))
|
| 1840 |
+
perm.rotate(shift_value_static)
|
| 1841 |
+
return tf.transpose(a=gathered_y, perm=perm)
|
| 1842 |
+
|
| 1843 |
+
|
| 1844 |
+
def quantile(x, q, axis=None, method="linear", keepdims=False):
|
| 1845 |
+
x = convert_to_tensor(x)
|
| 1846 |
+
q = convert_to_tensor(q)
|
| 1847 |
+
axis = to_tuple_or_list(axis)
|
| 1848 |
+
compute_dtype = dtypes.result_type(x.dtype, float)
|
| 1849 |
+
x = tf.cast(x, compute_dtype)
|
| 1850 |
+
return _quantile(x, q, axis=axis, method=method, keepdims=keepdims)
|
| 1851 |
+
|
| 1852 |
+
|
| 1853 |
+
def ravel(x):
|
| 1854 |
+
x = convert_to_tensor(x)
|
| 1855 |
+
return tf.reshape(x, [-1])
|
| 1856 |
+
|
| 1857 |
+
|
| 1858 |
+
def unravel_index(x, shape):
|
| 1859 |
+
x = tf.convert_to_tensor(x)
|
| 1860 |
+
input_dtype = x.dtype
|
| 1861 |
+
|
| 1862 |
+
if None in shape:
|
| 1863 |
+
raise ValueError(
|
| 1864 |
+
"`shape` argument cannot contain `None`. Received: shape={shape}"
|
| 1865 |
+
)
|
| 1866 |
+
|
| 1867 |
+
if x.ndim == 1:
|
| 1868 |
+
coords = []
|
| 1869 |
+
for dim in reversed(shape):
|
| 1870 |
+
coords.append(tf.cast(x % dim, input_dtype))
|
| 1871 |
+
x = x // dim
|
| 1872 |
+
return tuple(reversed(coords))
|
| 1873 |
+
|
| 1874 |
+
x_shape = x.shape
|
| 1875 |
+
coords = []
|
| 1876 |
+
for dim in shape:
|
| 1877 |
+
coords.append(tf.reshape(tf.cast(x % dim, input_dtype), x_shape))
|
| 1878 |
+
x = x // dim
|
| 1879 |
+
|
| 1880 |
+
return tuple(reversed(coords))
|
| 1881 |
+
|
| 1882 |
+
|
| 1883 |
+
@sparse.elementwise_unary
|
| 1884 |
+
def real(x):
|
| 1885 |
+
x = convert_to_tensor(x)
|
| 1886 |
+
return tf.math.real(x)
|
| 1887 |
+
|
| 1888 |
+
|
| 1889 |
+
@sparse.densifying_unary(np.inf)
|
| 1890 |
+
def reciprocal(x):
|
| 1891 |
+
x = convert_to_tensor(x)
|
| 1892 |
+
return tf.math.reciprocal(x)
|
| 1893 |
+
|
| 1894 |
+
|
| 1895 |
+
def repeat(x, repeats, axis=None):
|
| 1896 |
+
x = convert_to_tensor(x)
|
| 1897 |
+
# TODO: tf.repeat doesn't support uint16
|
| 1898 |
+
if standardize_dtype(x.dtype) == "uint16":
|
| 1899 |
+
x = tf.cast(x, "uint32")
|
| 1900 |
+
return tf.cast(tf.repeat(x, repeats, axis=axis), "uint16")
|
| 1901 |
+
return tf.repeat(x, repeats, axis=axis)
|
| 1902 |
+
|
| 1903 |
+
|
| 1904 |
+
def reshape(x, newshape):
|
| 1905 |
+
x = convert_to_tensor(x)
|
| 1906 |
+
if isinstance(x, tf.SparseTensor):
|
| 1907 |
+
from keras.src.ops.operation_utils import compute_reshape_output_shape
|
| 1908 |
+
|
| 1909 |
+
output_shape = compute_reshape_output_shape(
|
| 1910 |
+
x.shape, newshape, "newshape"
|
| 1911 |
+
)
|
| 1912 |
+
output = tf.sparse.reshape(x, newshape)
|
| 1913 |
+
output.set_shape(output_shape)
|
| 1914 |
+
return output
|
| 1915 |
+
return tf.reshape(x, newshape)
|
| 1916 |
+
|
| 1917 |
+
|
| 1918 |
+
def roll(x, shift, axis=None):
|
| 1919 |
+
x = convert_to_tensor(x)
|
| 1920 |
+
if axis is not None:
|
| 1921 |
+
return tf.roll(x, shift=shift, axis=axis)
|
| 1922 |
+
|
| 1923 |
+
# If axis is None, the roll happens as a 1-d tensor.
|
| 1924 |
+
original_shape = tf.shape(x)
|
| 1925 |
+
x = tf.roll(tf.reshape(x, [-1]), shift, 0)
|
| 1926 |
+
return tf.reshape(x, original_shape)
|
| 1927 |
+
|
| 1928 |
+
|
| 1929 |
+
def searchsorted(sorted_sequence, values, side="left"):
|
| 1930 |
+
if ndim(sorted_sequence) != 1:
|
| 1931 |
+
raise ValueError(
|
| 1932 |
+
"`searchsorted` only supports 1-D sorted sequences. "
|
| 1933 |
+
"You can use `keras.ops.vectorized_map` "
|
| 1934 |
+
"to extend it to N-D sequences. Received: "
|
| 1935 |
+
f"sorted_sequence.shape={sorted_sequence.shape}"
|
| 1936 |
+
)
|
| 1937 |
+
out_type = (
|
| 1938 |
+
"int32" if len(sorted_sequence) <= np.iinfo(np.int32).max else "int64"
|
| 1939 |
+
)
|
| 1940 |
+
return tf.searchsorted(
|
| 1941 |
+
sorted_sequence, values, side=side, out_type=out_type
|
| 1942 |
+
)
|
| 1943 |
+
|
| 1944 |
+
|
| 1945 |
+
@sparse.elementwise_unary
|
| 1946 |
+
def sign(x):
|
| 1947 |
+
x = convert_to_tensor(x)
|
| 1948 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 1949 |
+
# TODO: tf.sign doesn't support uint8, uint16, uint32
|
| 1950 |
+
if ori_dtype in ("uint8", "uint16", "uint32"):
|
| 1951 |
+
x = tf.cast(x, "int32")
|
| 1952 |
+
return tf.cast(tf.sign(x), ori_dtype)
|
| 1953 |
+
return tf.sign(x)
|
| 1954 |
+
|
| 1955 |
+
|
| 1956 |
+
@sparse.elementwise_unary
|
| 1957 |
+
def sin(x):
|
| 1958 |
+
x = convert_to_tensor(x)
|
| 1959 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 1960 |
+
dtype = config.floatx()
|
| 1961 |
+
else:
|
| 1962 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 1963 |
+
x = tf.cast(x, dtype)
|
| 1964 |
+
return tf.math.sin(x)
|
| 1965 |
+
|
| 1966 |
+
|
| 1967 |
+
@sparse.elementwise_unary
|
| 1968 |
+
def sinh(x):
|
| 1969 |
+
x = convert_to_tensor(x)
|
| 1970 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 1971 |
+
dtype = config.floatx()
|
| 1972 |
+
else:
|
| 1973 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 1974 |
+
x = tf.cast(x, dtype)
|
| 1975 |
+
return tf.math.sinh(x)
|
| 1976 |
+
|
| 1977 |
+
|
| 1978 |
+
def size(x):
|
| 1979 |
+
x = convert_to_tensor(x)
|
| 1980 |
+
return tf.size(x)
|
| 1981 |
+
|
| 1982 |
+
|
| 1983 |
+
def sort(x, axis=-1):
|
| 1984 |
+
x = convert_to_tensor(x)
|
| 1985 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 1986 |
+
# TODO: tf.sort doesn't support bool
|
| 1987 |
+
if ori_dtype == "bool":
|
| 1988 |
+
x = tf.cast(x, "int8")
|
| 1989 |
+
return tf.cast(tf.sort(x, axis=axis), ori_dtype)
|
| 1990 |
+
return tf.sort(x, axis=axis)
|
| 1991 |
+
|
| 1992 |
+
|
| 1993 |
+
def split(x, indices_or_sections, axis=0):
|
| 1994 |
+
if not isinstance(indices_or_sections, int):
|
| 1995 |
+
# `tf.split` requires `num_or_size_splits`, so we need to convert
|
| 1996 |
+
# `indices_or_sections` to the appropriate format.
|
| 1997 |
+
total_size = x.shape[axis]
|
| 1998 |
+
indices_or_sections = convert_to_tensor(indices_or_sections)
|
| 1999 |
+
start_size = indices_or_sections[0:1]
|
| 2000 |
+
end_size = total_size - indices_or_sections[-1:]
|
| 2001 |
+
num_or_size_splits = tf.concat(
|
| 2002 |
+
[start_size, diff(indices_or_sections), end_size], axis=0
|
| 2003 |
+
)
|
| 2004 |
+
else:
|
| 2005 |
+
num_or_size_splits = indices_or_sections
|
| 2006 |
+
return tf.split(x, num_or_size_splits, axis=axis)
|
| 2007 |
+
|
| 2008 |
+
|
| 2009 |
+
def stack(x, axis=0):
|
| 2010 |
+
dtype_set = set([getattr(a, "dtype", type(a)) for a in x])
|
| 2011 |
+
if len(dtype_set) > 1:
|
| 2012 |
+
dtype = dtypes.result_type(*dtype_set)
|
| 2013 |
+
x = tree.map_structure(lambda a: convert_to_tensor(a, dtype), x)
|
| 2014 |
+
return tf.stack(x, axis=axis)
|
| 2015 |
+
|
| 2016 |
+
|
| 2017 |
+
def std(x, axis=None, keepdims=False):
|
| 2018 |
+
x = convert_to_tensor(x)
|
| 2019 |
+
ori_dtype = standardize_dtype(x.dtype)
|
| 2020 |
+
if "int" in ori_dtype or ori_dtype == "bool":
|
| 2021 |
+
x = tf.cast(x, config.floatx())
|
| 2022 |
+
return tf.math.reduce_std(x, axis=axis, keepdims=keepdims)
|
| 2023 |
+
|
| 2024 |
+
|
| 2025 |
+
def swapaxes(x, axis1, axis2):
|
| 2026 |
+
x = convert_to_tensor(x)
|
| 2027 |
+
|
| 2028 |
+
if (
|
| 2029 |
+
x.shape.rank is not None
|
| 2030 |
+
and isinstance(axis1, int)
|
| 2031 |
+
and isinstance(axis2, int)
|
| 2032 |
+
):
|
| 2033 |
+
# This branch makes sure `perm` is statically known, to avoid a
|
| 2034 |
+
# not-compile-time-constant XLA error.
|
| 2035 |
+
axis1 = canonicalize_axis(axis1, x.ndim)
|
| 2036 |
+
axis2 = canonicalize_axis(axis2, x.ndim)
|
| 2037 |
+
|
| 2038 |
+
# Directly return x if no movement is required
|
| 2039 |
+
if axis1 == axis2:
|
| 2040 |
+
return x
|
| 2041 |
+
|
| 2042 |
+
perm = list(range(x.ndim))
|
| 2043 |
+
perm[axis1] = axis2
|
| 2044 |
+
perm[axis2] = axis1
|
| 2045 |
+
else:
|
| 2046 |
+
x_rank = tf.rank(x)
|
| 2047 |
+
axis1 = tf.where(axis1 < 0, tf.add(axis1, x_rank), axis1)
|
| 2048 |
+
axis2 = tf.where(axis2 < 0, tf.add(axis2, x_rank), axis2)
|
| 2049 |
+
perm = tf.range(x_rank)
|
| 2050 |
+
perm = tf.tensor_scatter_nd_update(
|
| 2051 |
+
perm, [[axis1], [axis2]], [axis2, axis1]
|
| 2052 |
+
)
|
| 2053 |
+
return tf.transpose(x, perm)
|
| 2054 |
+
|
| 2055 |
+
|
| 2056 |
+
def take(x, indices, axis=None):
|
| 2057 |
+
if isinstance(indices, tf.SparseTensor):
|
| 2058 |
+
if x.dtype not in (tf.float16, tf.float32, tf.float64, tf.bfloat16):
|
| 2059 |
+
warnings.warn(
|
| 2060 |
+
"`take` with the TensorFlow backend does not support "
|
| 2061 |
+
f"`x.dtype={x.dtype}` when `indices` is a sparse tensor; "
|
| 2062 |
+
"densifying `indices`."
|
| 2063 |
+
)
|
| 2064 |
+
return take(x, convert_to_tensor(indices, sparse=False), axis=axis)
|
| 2065 |
+
if axis is None:
|
| 2066 |
+
x = tf.reshape(x, (-1,))
|
| 2067 |
+
elif axis != 0:
|
| 2068 |
+
warnings.warn(
|
| 2069 |
+
"`take` with the TensorFlow backend does not support "
|
| 2070 |
+
f"`axis={axis}` when `indices` is a sparse tensor; "
|
| 2071 |
+
"densifying `indices`."
|
| 2072 |
+
)
|
| 2073 |
+
return take(x, convert_to_tensor(indices, sparse=False), axis=axis)
|
| 2074 |
+
output = tf.nn.safe_embedding_lookup_sparse(
|
| 2075 |
+
embedding_weights=tf.convert_to_tensor(x),
|
| 2076 |
+
sparse_ids=tf.sparse.expand_dims(indices, axis=-1),
|
| 2077 |
+
default_id=0,
|
| 2078 |
+
)
|
| 2079 |
+
output.set_shape(indices.shape + output.shape[len(indices.shape) :])
|
| 2080 |
+
return output
|
| 2081 |
+
|
| 2082 |
+
x = convert_to_tensor(x)
|
| 2083 |
+
indices = convert_to_tensor(indices)
|
| 2084 |
+
if axis is None:
|
| 2085 |
+
x = tf.reshape(x, [-1])
|
| 2086 |
+
axis = 0
|
| 2087 |
+
# Correct the indices using "fill" mode which is the same as in jax
|
| 2088 |
+
indices = tf.where(
|
| 2089 |
+
indices < 0,
|
| 2090 |
+
indices + tf.cast(tf.shape(x)[axis], indices.dtype),
|
| 2091 |
+
indices,
|
| 2092 |
+
)
|
| 2093 |
+
return tf.gather(x, indices, axis=axis)
|
| 2094 |
+
|
| 2095 |
+
|
| 2096 |
+
def take_along_axis(x, indices, axis=None):
|
| 2097 |
+
from keras.src.ops.operation_utils import (
|
| 2098 |
+
compute_take_along_axis_output_shape,
|
| 2099 |
+
)
|
| 2100 |
+
|
| 2101 |
+
x = convert_to_tensor(x)
|
| 2102 |
+
indices = convert_to_tensor(indices, "int64")
|
| 2103 |
+
if axis is None:
|
| 2104 |
+
if indices.ndim != 1:
|
| 2105 |
+
raise ValueError(
|
| 2106 |
+
"`indices` must be 1D if axis=None. "
|
| 2107 |
+
f"Received: indices.shape={indices.shape}"
|
| 2108 |
+
)
|
| 2109 |
+
return take_along_axis(tf.reshape(x, [-1]), indices, 0)
|
| 2110 |
+
|
| 2111 |
+
# Compute the static output shape as later on, all shapes manipulations
|
| 2112 |
+
# use dynamic shapes.
|
| 2113 |
+
static_output_shape = compute_take_along_axis_output_shape(
|
| 2114 |
+
x.shape, indices.shape, axis
|
| 2115 |
+
)
|
| 2116 |
+
rank = x.ndim
|
| 2117 |
+
static_axis = axis
|
| 2118 |
+
axis = axis + rank if axis < 0 else axis
|
| 2119 |
+
|
| 2120 |
+
# Broadcast shapes to match, ensure that the axis of interest is not
|
| 2121 |
+
# broadcast.
|
| 2122 |
+
x_shape_original = tf.shape(x, out_type=indices.dtype)
|
| 2123 |
+
indices_shape_original = tf.shape(indices, out_type=indices.dtype)
|
| 2124 |
+
x_shape = tf.tensor_scatter_nd_update(x_shape_original, [[axis]], [1])
|
| 2125 |
+
indices_shape = tf.tensor_scatter_nd_update(
|
| 2126 |
+
indices_shape_original, [[axis]], [1]
|
| 2127 |
+
)
|
| 2128 |
+
broadcasted_shape = tf.broadcast_dynamic_shape(x_shape, indices_shape)
|
| 2129 |
+
x_shape = tf.tensor_scatter_nd_update(
|
| 2130 |
+
broadcasted_shape, [[axis]], [x_shape_original[axis]]
|
| 2131 |
+
)
|
| 2132 |
+
indices_shape = tf.tensor_scatter_nd_update(
|
| 2133 |
+
broadcasted_shape, [[axis]], [indices_shape_original[axis]]
|
| 2134 |
+
)
|
| 2135 |
+
x = tf.broadcast_to(x, x_shape)
|
| 2136 |
+
indices = tf.broadcast_to(indices, indices_shape)
|
| 2137 |
+
|
| 2138 |
+
# Correct the indices using "fill" mode which is the same as in jax
|
| 2139 |
+
indices = tf.where(indices < 0, indices + x_shape[static_axis], indices)
|
| 2140 |
+
|
| 2141 |
+
x = swapaxes(x, static_axis, -1)
|
| 2142 |
+
indices = swapaxes(indices, static_axis, -1)
|
| 2143 |
+
|
| 2144 |
+
x_shape = tf.shape(x)
|
| 2145 |
+
x = tf.reshape(x, [-1, x_shape[-1]])
|
| 2146 |
+
indices_shape = tf.shape(indices)
|
| 2147 |
+
indices = tf.reshape(indices, [-1, indices_shape[-1]])
|
| 2148 |
+
|
| 2149 |
+
result = tf.gather(x, indices, batch_dims=1)
|
| 2150 |
+
result = tf.reshape(result, indices_shape)
|
| 2151 |
+
result = swapaxes(result, static_axis, -1)
|
| 2152 |
+
result.set_shape(static_output_shape)
|
| 2153 |
+
return result
|
| 2154 |
+
|
| 2155 |
+
|
| 2156 |
+
@sparse.elementwise_unary
|
| 2157 |
+
def tan(x):
|
| 2158 |
+
x = convert_to_tensor(x)
|
| 2159 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 2160 |
+
dtype = config.floatx()
|
| 2161 |
+
else:
|
| 2162 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 2163 |
+
x = tf.cast(x, dtype)
|
| 2164 |
+
return tf.math.tan(x)
|
| 2165 |
+
|
| 2166 |
+
|
| 2167 |
+
@sparse.elementwise_unary
|
| 2168 |
+
def tanh(x):
|
| 2169 |
+
x = convert_to_tensor(x)
|
| 2170 |
+
if standardize_dtype(x.dtype) == "int64":
|
| 2171 |
+
dtype = config.floatx()
|
| 2172 |
+
else:
|
| 2173 |
+
dtype = dtypes.result_type(x.dtype, float)
|
| 2174 |
+
x = tf.cast(x, dtype)
|
| 2175 |
+
return tf.math.tanh(x)
|
| 2176 |
+
|
| 2177 |
+
|
| 2178 |
+
def tensordot(x1, x2, axes=2):
|
| 2179 |
+
x1 = convert_to_tensor(x1)
|
| 2180 |
+
x2 = convert_to_tensor(x2)
|
| 2181 |
+
result_dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 2182 |
+
# TODO: tf.tensordot only supports float types
|
| 2183 |
+
compute_dtype = dtypes.result_type(result_dtype, float)
|
| 2184 |
+
x1 = tf.cast(x1, compute_dtype)
|
| 2185 |
+
x2 = tf.cast(x2, compute_dtype)
|
| 2186 |
+
return tf.cast(tf.tensordot(x1, x2, axes=axes), dtype=result_dtype)
|
| 2187 |
+
|
| 2188 |
+
|
| 2189 |
+
@sparse.elementwise_unary
|
| 2190 |
+
def round(x, decimals=0):
|
| 2191 |
+
if decimals == 0:
|
| 2192 |
+
return tf.round(x)
|
| 2193 |
+
x_dtype = x.dtype
|
| 2194 |
+
if tf.as_dtype(x_dtype).is_integer:
|
| 2195 |
+
# int
|
| 2196 |
+
if decimals > 0:
|
| 2197 |
+
return x
|
| 2198 |
+
# temporarily convert to floats
|
| 2199 |
+
factor = tf.cast(math.pow(10, decimals), config.floatx())
|
| 2200 |
+
x = tf.cast(x, config.floatx())
|
| 2201 |
+
else:
|
| 2202 |
+
# float
|
| 2203 |
+
factor = tf.cast(math.pow(10, decimals), x.dtype)
|
| 2204 |
+
x = tf.multiply(x, factor)
|
| 2205 |
+
x = tf.round(x)
|
| 2206 |
+
x = tf.divide(x, factor)
|
| 2207 |
+
return tf.cast(x, x_dtype)
|
| 2208 |
+
|
| 2209 |
+
|
| 2210 |
+
def tile(x, repeats):
|
| 2211 |
+
x = convert_to_tensor(x)
|
| 2212 |
+
repeats = tf.reshape(convert_to_tensor(repeats, dtype="int32"), [-1])
|
| 2213 |
+
repeats_size = tf.size(repeats)
|
| 2214 |
+
repeats = tf.pad(
|
| 2215 |
+
repeats,
|
| 2216 |
+
[[tf.maximum(x.shape.rank - repeats_size, 0), 0]],
|
| 2217 |
+
constant_values=1,
|
| 2218 |
+
)
|
| 2219 |
+
x_shape = tf.pad(
|
| 2220 |
+
tf.shape(x),
|
| 2221 |
+
[[tf.maximum(repeats_size - x.shape.rank, 0), 0]],
|
| 2222 |
+
constant_values=1,
|
| 2223 |
+
)
|
| 2224 |
+
x = tf.reshape(x, x_shape)
|
| 2225 |
+
return tf.tile(x, repeats)
|
| 2226 |
+
|
| 2227 |
+
|
| 2228 |
+
def trace(x, offset=0, axis1=0, axis2=1):
|
| 2229 |
+
x = convert_to_tensor(x)
|
| 2230 |
+
dtype = standardize_dtype(x.dtype)
|
| 2231 |
+
if dtype not in ("int64", "uint32", "uint64"):
|
| 2232 |
+
dtype = dtypes.result_type(dtype, "int32")
|
| 2233 |
+
x_shape = tf.shape(x)
|
| 2234 |
+
x = moveaxis(x, (axis1, axis2), (-2, -1))
|
| 2235 |
+
# Mask out the diagonal and reduce.
|
| 2236 |
+
x = tf.where(
|
| 2237 |
+
eye(x_shape[axis1], x_shape[axis2], k=offset, dtype="bool"),
|
| 2238 |
+
x,
|
| 2239 |
+
tf.zeros_like(x),
|
| 2240 |
+
)
|
| 2241 |
+
# The output dtype is set to "int32" if the input dtype is "bool"
|
| 2242 |
+
if standardize_dtype(x.dtype) == "bool":
|
| 2243 |
+
x = tf.cast(x, "int32")
|
| 2244 |
+
return tf.cast(tf.reduce_sum(x, axis=(-2, -1)), dtype)
|
| 2245 |
+
|
| 2246 |
+
|
| 2247 |
+
def tri(N, M=None, k=0, dtype=None):
|
| 2248 |
+
M = M if M is not None else N
|
| 2249 |
+
dtype = standardize_dtype(dtype or config.floatx())
|
| 2250 |
+
if k < 0:
|
| 2251 |
+
lower = -k - 1
|
| 2252 |
+
if lower > N:
|
| 2253 |
+
r = tf.zeros([N, M], dtype=dtype)
|
| 2254 |
+
else:
|
| 2255 |
+
o = tf.ones([N, M], dtype="bool")
|
| 2256 |
+
r = tf.cast(
|
| 2257 |
+
tf.logical_not(tf.linalg.band_part(o, lower, -1)), dtype=dtype
|
| 2258 |
+
)
|
| 2259 |
+
else:
|
| 2260 |
+
o = tf.ones([N, M], dtype=dtype)
|
| 2261 |
+
if k > M:
|
| 2262 |
+
r = o
|
| 2263 |
+
else:
|
| 2264 |
+
r = tf.linalg.band_part(o, -1, k)
|
| 2265 |
+
return r
|
| 2266 |
+
|
| 2267 |
+
|
| 2268 |
+
def tril(x, k=0):
|
| 2269 |
+
x = convert_to_tensor(x)
|
| 2270 |
+
|
| 2271 |
+
def _negative_k_branch():
|
| 2272 |
+
shape = tf.shape(x)
|
| 2273 |
+
rows, cols = shape[-2], shape[-1]
|
| 2274 |
+
i, j = tf.meshgrid(tf.range(rows), tf.range(cols), indexing="ij")
|
| 2275 |
+
mask = i >= j - k
|
| 2276 |
+
return tf.where(tf.broadcast_to(mask, shape), x, tf.zeros_like(x))
|
| 2277 |
+
|
| 2278 |
+
return tf.cond(
|
| 2279 |
+
k >= 0, lambda: tf.linalg.band_part(x, -1, k), _negative_k_branch
|
| 2280 |
+
)
|
| 2281 |
+
|
| 2282 |
+
|
| 2283 |
+
def triu(x, k=0):
|
| 2284 |
+
x = convert_to_tensor(x)
|
| 2285 |
+
|
| 2286 |
+
def _positive_k_branch():
|
| 2287 |
+
shape = tf.shape(x)
|
| 2288 |
+
rows, cols = shape[-2], shape[-1]
|
| 2289 |
+
i, j = tf.meshgrid(tf.range(rows), tf.range(cols), indexing="ij")
|
| 2290 |
+
mask = i <= j - k
|
| 2291 |
+
return tf.where(tf.broadcast_to(mask, shape), x, tf.zeros_like(x))
|
| 2292 |
+
|
| 2293 |
+
return tf.cond(
|
| 2294 |
+
k <= 0, lambda: tf.linalg.band_part(x, -k, -1), _positive_k_branch
|
| 2295 |
+
)
|
| 2296 |
+
|
| 2297 |
+
|
| 2298 |
+
def trunc(x):
|
| 2299 |
+
x = convert_to_tensor(x)
|
| 2300 |
+
dtype = standardize_dtype(x.dtype)
|
| 2301 |
+
if dtype == "bool" or "int" in dtype:
|
| 2302 |
+
return x
|
| 2303 |
+
return tf.where(x < 0, tf.math.ceil(x), tf.math.floor(x))
|
| 2304 |
+
|
| 2305 |
+
|
| 2306 |
+
def vdot(x1, x2):
|
| 2307 |
+
x1 = convert_to_tensor(x1)
|
| 2308 |
+
x2 = convert_to_tensor(x2)
|
| 2309 |
+
result_dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 2310 |
+
compute_dtype = dtypes.result_type(result_dtype, float)
|
| 2311 |
+
x1 = tf.cast(x1, compute_dtype)
|
| 2312 |
+
x2 = tf.cast(x2, compute_dtype)
|
| 2313 |
+
x1 = tf.reshape(x1, [-1])
|
| 2314 |
+
x2 = tf.reshape(x2, [-1])
|
| 2315 |
+
return tf.cast(dot(x1, x2), result_dtype)
|
| 2316 |
+
|
| 2317 |
+
|
| 2318 |
+
def inner(x1, x2):
|
| 2319 |
+
x1 = convert_to_tensor(x1)
|
| 2320 |
+
x2 = convert_to_tensor(x2)
|
| 2321 |
+
result_dtype = dtypes.result_type(x1.dtype, x2.dtype)
|
| 2322 |
+
compute_dtype = dtypes.result_type(result_dtype, float)
|
| 2323 |
+
x1 = tf.cast(x1, compute_dtype)
|
| 2324 |
+
x2 = tf.cast(x2, compute_dtype)
|
| 2325 |
+
x = tf.cond(
|
| 2326 |
+
tf.math.logical_or(
|
| 2327 |
+
tf.math.equal(tf.rank(x1), 0),
|
| 2328 |
+
tf.math.equal(tf.rank(x2), 0),
|
| 2329 |
+
),
|
| 2330 |
+
lambda: x1 * x2,
|
| 2331 |
+
lambda: tf.tensordot(x1, x2, axes=[[-1], [-1]]),
|
| 2332 |
+
)
|
| 2333 |
+
return tf.cast(x, result_dtype)
|
| 2334 |
+
|
| 2335 |
+
|
| 2336 |
+
def vstack(xs):
|
| 2337 |
+
dtype_set = set([getattr(x, "dtype", type(x)) for x in xs])
|
| 2338 |
+
if len(dtype_set) > 1:
|
| 2339 |
+
dtype = dtypes.result_type(*dtype_set)
|
| 2340 |
+
xs = tree.map_structure(lambda x: convert_to_tensor(x, dtype), xs)
|
| 2341 |
+
return tf.concat(xs, axis=0)
|
| 2342 |
+
|
| 2343 |
+
|
| 2344 |
+
def _vmap_fn(fn, in_axes=0):
|
| 2345 |
+
if in_axes != 0:
|
| 2346 |
+
raise ValueError(
|
| 2347 |
+
"Not supported with `vectorize()` with the TensorFlow backend."
|
| 2348 |
+
)
|
| 2349 |
+
|
| 2350 |
+
@functools.wraps(fn)
|
| 2351 |
+
def wrapped(x):
|
| 2352 |
+
return tf.vectorized_map(fn, x)
|
| 2353 |
+
|
| 2354 |
+
return wrapped
|
| 2355 |
+
|
| 2356 |
+
|
| 2357 |
+
def vectorize(pyfunc, *, excluded=None, signature=None):
|
| 2358 |
+
return vectorize_impl(
|
| 2359 |
+
pyfunc, _vmap_fn, excluded=excluded, signature=signature
|
| 2360 |
+
)
|
| 2361 |
+
|
| 2362 |
+
|
| 2363 |
+
def where(condition, x1, x2):
|
| 2364 |
+
condition = tf.cast(condition, "bool")
|
| 2365 |
+
if x1 is not None and x2 is not None:
|
| 2366 |
+
if not isinstance(x1, (int, float)):
|
| 2367 |
+
x1 = convert_to_tensor(x1)
|
| 2368 |
+
if not isinstance(x2, (int, float)):
|
| 2369 |
+
x2 = convert_to_tensor(x2)
|
| 2370 |
+
dtype = dtypes.result_type(
|
| 2371 |
+
getattr(x1, "dtype", type(x1)),
|
| 2372 |
+
getattr(x2, "dtype", type(x2)),
|
| 2373 |
+
)
|
| 2374 |
+
x1 = convert_to_tensor(x1, dtype)
|
| 2375 |
+
x2 = convert_to_tensor(x2, dtype)
|
| 2376 |
+
return tf.where(condition, x1, x2)
|
| 2377 |
+
if x1 is None and x2 is None:
|
| 2378 |
+
return nonzero(condition)
|
| 2379 |
+
raise ValueError(
|
| 2380 |
+
"`x1` and `x2` either both should be `None`"
|
| 2381 |
+
" or both should have non-None value."
|
| 2382 |
+
)
|
| 2383 |
+
|
| 2384 |
+
|
| 2385 |
+
@sparse.elementwise_division
|
| 2386 |
+
def divide(x1, x2):
|
| 2387 |
+
if not isinstance(x1, (int, float)):
|
| 2388 |
+
x1 = convert_to_tensor(x1)
|
| 2389 |
+
if not isinstance(x2, (int, float)):
|
| 2390 |
+
x2 = convert_to_tensor(x2)
|
| 2391 |
+
dtype = dtypes.result_type(
|
| 2392 |
+
getattr(x1, "dtype", type(x1)),
|
| 2393 |
+
getattr(x2, "dtype", type(x2)),
|
| 2394 |
+
float,
|
| 2395 |
+
)
|
| 2396 |
+
x1 = convert_to_tensor(x1, dtype)
|
| 2397 |
+
x2 = convert_to_tensor(x2, dtype)
|
| 2398 |
+
return tf.divide(x1, x2)
|
| 2399 |
+
|
| 2400 |
+
|
| 2401 |
+
def divide_no_nan(x1, x2):
|
| 2402 |
+
if not isinstance(x1, (int, float)):
|
| 2403 |
+
x1 = convert_to_tensor(x1)
|
| 2404 |
+
if not isinstance(x2, (int, float)):
|
| 2405 |
+
x2 = convert_to_tensor(x2)
|
| 2406 |
+
dtype = dtypes.result_type(
|
| 2407 |
+
getattr(x1, "dtype", type(x1)),
|
| 2408 |
+
getattr(x2, "dtype", type(x2)),
|
| 2409 |
+
float,
|
| 2410 |
+
)
|
| 2411 |
+
x1 = convert_to_tensor(x1, dtype)
|
| 2412 |
+
x2 = convert_to_tensor(x2, dtype)
|
| 2413 |
+
return tf.math.divide_no_nan(x1, x2)
|
| 2414 |
+
|
| 2415 |
+
|
| 2416 |
+
def true_divide(x1, x2):
|
| 2417 |
+
return divide(x1, x2)
|
| 2418 |
+
|
| 2419 |
+
|
| 2420 |
+
def power(x1, x2):
|
| 2421 |
+
if not isinstance(x1, (int, float)):
|
| 2422 |
+
x1 = convert_to_tensor(x1)
|
| 2423 |
+
if not isinstance(x2, (int, float)):
|
| 2424 |
+
x2 = convert_to_tensor(x2)
|
| 2425 |
+
dtype = dtypes.result_type(
|
| 2426 |
+
getattr(x1, "dtype", type(x1)),
|
| 2427 |
+
getattr(x2, "dtype", type(x2)),
|
| 2428 |
+
)
|
| 2429 |
+
# TODO: tf.pow doesn't support uint* types
|
| 2430 |
+
if "uint" in dtype:
|
| 2431 |
+
x1 = convert_to_tensor(x1, "int32")
|
| 2432 |
+
x2 = convert_to_tensor(x2, "int32")
|
| 2433 |
+
return tf.cast(tf.pow(x1, x2), dtype)
|
| 2434 |
+
x1 = convert_to_tensor(x1, dtype)
|
| 2435 |
+
x2 = convert_to_tensor(x2, dtype)
|
| 2436 |
+
return tf.pow(x1, x2)
|
| 2437 |
+
|
| 2438 |
+
|
| 2439 |
+
@sparse.elementwise_unary
|
| 2440 |
+
def negative(x):
|
| 2441 |
+
return tf.negative(x)
|
| 2442 |
+
|
| 2443 |
+
|
| 2444 |
+
@sparse.elementwise_unary
|
| 2445 |
+
def square(x):
|
| 2446 |
+
x = convert_to_tensor(x)
|
| 2447 |
+
if standardize_dtype(x.dtype) == "bool":
|
| 2448 |
+
x = tf.cast(x, "int32")
|
| 2449 |
+
return tf.square(x)
|
| 2450 |
+
|
| 2451 |
+
|
| 2452 |
+
@sparse.elementwise_unary
|
| 2453 |
+
def sqrt(x):
|
| 2454 |
+
x = convert_to_tensor(x)
|
| 2455 |
+
dtype = (
|
| 2456 |
+
config.floatx()
|
| 2457 |
+
if standardize_dtype(x.dtype) == "int64"
|
| 2458 |
+
else dtypes.result_type(x.dtype, float)
|
| 2459 |
+
)
|
| 2460 |
+
x = tf.cast(x, dtype)
|
| 2461 |
+
return tf.math.sqrt(x)
|
| 2462 |
+
|
| 2463 |
+
|
| 2464 |
+
def squeeze(x, axis=None):
|
| 2465 |
+
x = convert_to_tensor(x)
|
| 2466 |
+
axis = to_tuple_or_list(axis)
|
| 2467 |
+
static_shape = x.shape.as_list()
|
| 2468 |
+
if axis is not None:
|
| 2469 |
+
for a in axis:
|
| 2470 |
+
if static_shape[a] != 1:
|
| 2471 |
+
raise ValueError(
|
| 2472 |
+
f"Cannot squeeze axis={a}, because the "
|
| 2473 |
+
"dimension is not 1."
|
| 2474 |
+
)
|
| 2475 |
+
axis = sorted([canonicalize_axis(a, len(static_shape)) for a in axis])
|
| 2476 |
+
if isinstance(x, tf.SparseTensor):
|
| 2477 |
+
dynamic_shape = tf.shape(x)
|
| 2478 |
+
new_shape = []
|
| 2479 |
+
gather_indices = []
|
| 2480 |
+
for i, dim in enumerate(static_shape):
|
| 2481 |
+
if not (dim == 1 if axis is None else i in axis):
|
| 2482 |
+
new_shape.append(dim if dim is not None else dynamic_shape[i])
|
| 2483 |
+
gather_indices.append(i)
|
| 2484 |
+
new_indices = tf.gather(x.indices, gather_indices, axis=1)
|
| 2485 |
+
return tf.SparseTensor(new_indices, x.values, tuple(new_shape))
|
| 2486 |
+
return tf.squeeze(x, axis=axis)
|
| 2487 |
+
|
| 2488 |
+
|
| 2489 |
+
def transpose(x, axes=None):
|
| 2490 |
+
if isinstance(x, tf.SparseTensor):
|
| 2491 |
+
from keras.src.ops.operation_utils import compute_transpose_output_shape
|
| 2492 |
+
|
| 2493 |
+
output = tf.sparse.transpose(x, perm=axes)
|
| 2494 |
+
output.set_shape(compute_transpose_output_shape(x.shape, axes))
|
| 2495 |
+
return output
|
| 2496 |
+
return tf.transpose(x, perm=axes)
|
| 2497 |
+
|
| 2498 |
+
|
| 2499 |
+
def var(x, axis=None, keepdims=False):
|
| 2500 |
+
x = convert_to_tensor(x)
|
| 2501 |
+
compute_dtype = dtypes.result_type(x.dtype, "float32")
|
| 2502 |
+
result_dtype = dtypes.result_type(x.dtype, float)
|
| 2503 |
+
x = tf.cast(x, compute_dtype)
|
| 2504 |
+
return tf.cast(
|
| 2505 |
+
tf.math.reduce_variance(x, axis=axis, keepdims=keepdims),
|
| 2506 |
+
result_dtype,
|
| 2507 |
+
)
|
| 2508 |
+
|
| 2509 |
+
|
| 2510 |
+
def sum(x, axis=None, keepdims=False):
|
| 2511 |
+
x = convert_to_tensor(x)
|
| 2512 |
+
dtype = standardize_dtype(x.dtype)
|
| 2513 |
+
# follow jax's rule
|
| 2514 |
+
if dtype in ("bool", "int8", "int16"):
|
| 2515 |
+
dtype = "int32"
|
| 2516 |
+
elif dtype in ("uint8", "uint16"):
|
| 2517 |
+
dtype = "uint32"
|
| 2518 |
+
x = cast(x, dtype)
|
| 2519 |
+
if isinstance(x, tf.SparseTensor):
|
| 2520 |
+
return tf.sparse.reduce_sum(
|
| 2521 |
+
x, axis=axis, keepdims=keepdims, output_is_sparse=True
|
| 2522 |
+
)
|
| 2523 |
+
return tf.reduce_sum(x, axis=axis, keepdims=keepdims)
|
| 2524 |
+
|
| 2525 |
+
|
| 2526 |
+
def eye(N, M=None, k=0, dtype=None):
|
| 2527 |
+
dtype = dtype or config.floatx()
|
| 2528 |
+
M = N if M is None else M
|
| 2529 |
+
if isinstance(k, int) and k == 0:
|
| 2530 |
+
return tf.eye(N, M, dtype=dtype)
|
| 2531 |
+
# Create a smaller square eye and pad appropriately.
|
| 2532 |
+
return tf.pad(
|
| 2533 |
+
tf.eye(tf.minimum(M - k, N + k), dtype=dtype),
|
| 2534 |
+
paddings=(
|
| 2535 |
+
(tf.maximum(-k, 0), tf.maximum(N - M + k, 0)),
|
| 2536 |
+
(tf.maximum(k, 0), tf.maximum(M - N - k, 0)),
|
| 2537 |
+
),
|
| 2538 |
+
)
|
| 2539 |
+
|
| 2540 |
+
|
| 2541 |
+
def floor_divide(x1, x2):
|
| 2542 |
+
if not isinstance(x1, (int, float)):
|
| 2543 |
+
x1 = convert_to_tensor(x1)
|
| 2544 |
+
if not isinstance(x2, (int, float)):
|
| 2545 |
+
x2 = convert_to_tensor(x2)
|
| 2546 |
+
dtype = dtypes.result_type(
|
| 2547 |
+
getattr(x1, "dtype", type(x1)),
|
| 2548 |
+
getattr(x2, "dtype", type(x2)),
|
| 2549 |
+
)
|
| 2550 |
+
x1 = convert_to_tensor(x1, dtype)
|
| 2551 |
+
x2 = convert_to_tensor(x2, dtype)
|
| 2552 |
+
return tf.math.floordiv(x1, x2)
|
| 2553 |
+
|
| 2554 |
+
|
| 2555 |
+
def logical_xor(x1, x2):
|
| 2556 |
+
x1 = tf.cast(x1, "bool")
|
| 2557 |
+
x2 = tf.cast(x2, "bool")
|
| 2558 |
+
return tf.math.logical_xor(x1, x2)
|
| 2559 |
+
|
| 2560 |
+
|
| 2561 |
+
def correlate(x1, x2, mode="valid"):
|
| 2562 |
+
x1 = convert_to_tensor(x1)
|
| 2563 |
+
x2 = convert_to_tensor(x2)
|
| 2564 |
+
|
| 2565 |
+
dtype = dtypes.result_type(
|
| 2566 |
+
getattr(x1, "dtype", type(x1)),
|
| 2567 |
+
getattr(x2, "dtype", type(x2)),
|
| 2568 |
+
)
|
| 2569 |
+
if dtype == tf.int64:
|
| 2570 |
+
dtype = tf.float64
|
| 2571 |
+
elif dtype not in [tf.bfloat16, tf.float16, tf.float64]:
|
| 2572 |
+
dtype = tf.float32
|
| 2573 |
+
|
| 2574 |
+
x1 = tf.cast(x1, dtype)
|
| 2575 |
+
x2 = tf.cast(x2, dtype)
|
| 2576 |
+
|
| 2577 |
+
x1_len, x2_len = int(x1.shape[0]), int(x2.shape[0])
|
| 2578 |
+
|
| 2579 |
+
if mode == "full":
|
| 2580 |
+
full_len = x1_len + x2_len - 1
|
| 2581 |
+
|
| 2582 |
+
x1_pad = (full_len - x1_len) / 2
|
| 2583 |
+
x2_pad = (full_len - x2_len) / 2
|
| 2584 |
+
|
| 2585 |
+
x1 = tf.pad(
|
| 2586 |
+
x1, paddings=[[tf.math.floor(x1_pad), tf.math.ceil(x1_pad)]]
|
| 2587 |
+
)
|
| 2588 |
+
x2 = tf.pad(
|
| 2589 |
+
x2, paddings=[[tf.math.floor(x2_pad), tf.math.ceil(x2_pad)]]
|
| 2590 |
+
)
|
| 2591 |
+
|
| 2592 |
+
x1 = tf.reshape(x1, (1, full_len, 1))
|
| 2593 |
+
x2 = tf.reshape(x2, (full_len, 1, 1))
|
| 2594 |
+
|
| 2595 |
+
return tf.squeeze(tf.nn.conv1d(x1, x2, stride=1, padding="SAME"))
|
| 2596 |
+
|
| 2597 |
+
x1 = tf.reshape(x1, (1, x1_len, 1))
|
| 2598 |
+
x2 = tf.reshape(x2, (x2_len, 1, 1))
|
| 2599 |
+
|
| 2600 |
+
return tf.squeeze(tf.nn.conv1d(x1, x2, stride=1, padding=mode.upper()))
|
| 2601 |
+
|
| 2602 |
+
|
| 2603 |
+
def select(condlist, choicelist, default=0):
|
| 2604 |
+
return tf.experimental.numpy.select(condlist, choicelist, default=default)
|
| 2605 |
+
|
| 2606 |
+
|
| 2607 |
+
def slogdet(x):
|
| 2608 |
+
x = convert_to_tensor(x)
|
| 2609 |
+
return tuple(tf.linalg.slogdet(x))
|
| 2610 |
+
|
| 2611 |
+
|
| 2612 |
+
def argpartition(x, kth, axis=-1):
|
| 2613 |
+
x = convert_to_tensor(x, tf.int32)
|
| 2614 |
+
|
| 2615 |
+
x = swapaxes(x, axis, -1)
|
| 2616 |
+
bottom_ind = tf.math.top_k(-x, kth + 1).indices
|
| 2617 |
+
|
| 2618 |
+
n = tf.shape(x)[-1]
|
| 2619 |
+
|
| 2620 |
+
mask = tf.reduce_sum(tf.one_hot(bottom_ind, n, dtype=tf.int32), axis=0)
|
| 2621 |
+
|
| 2622 |
+
indices = tf.where(mask)
|
| 2623 |
+
updates = tf.squeeze(tf.zeros(tf.shape(indices)[0], dtype=tf.int32))
|
| 2624 |
+
|
| 2625 |
+
final_mask = tf.tensor_scatter_nd_update(x, indices, updates)
|
| 2626 |
+
|
| 2627 |
+
top_ind = tf.math.top_k(final_mask, tf.shape(x)[-1] - kth - 1).indices
|
| 2628 |
+
|
| 2629 |
+
out = tf.concat([bottom_ind, top_ind], axis=x.ndim - 1)
|
| 2630 |
+
return swapaxes(out, -1, axis)
|
| 2631 |
+
|
| 2632 |
+
|
| 2633 |
+
def histogram(x, bins, range):
|
| 2634 |
+
"""Computes a histogram of the data tensor `x`.
|
| 2635 |
+
|
| 2636 |
+
Note: the `tf.histogram_fixed_width()` and
|
| 2637 |
+
`tf.histogram_fixed_width_bins()` functions
|
| 2638 |
+
yield slight numerical differences for some edge cases.
|
| 2639 |
+
"""
|
| 2640 |
+
|
| 2641 |
+
x = tf.convert_to_tensor(x, dtype=x.dtype)
|
| 2642 |
+
|
| 2643 |
+
# Handle the range argument
|
| 2644 |
+
if range is None:
|
| 2645 |
+
min_val = tf.reduce_min(x)
|
| 2646 |
+
max_val = tf.reduce_max(x)
|
| 2647 |
+
else:
|
| 2648 |
+
min_val, max_val = range
|
| 2649 |
+
|
| 2650 |
+
x = tf.boolean_mask(x, (x >= min_val) & (x <= max_val))
|
| 2651 |
+
bin_edges = tf.linspace(min_val, max_val, bins + 1)
|
| 2652 |
+
bin_edges_list = bin_edges.numpy().tolist()
|
| 2653 |
+
bin_indices = tf.raw_ops.Bucketize(input=x, boundaries=bin_edges_list[1:-1])
|
| 2654 |
+
|
| 2655 |
+
bin_counts = tf.math.bincount(
|
| 2656 |
+
bin_indices, minlength=bins, maxlength=bins, dtype=x.dtype
|
| 2657 |
+
)
|
| 2658 |
+
return bin_counts, bin_edges
|