diff --git a/.gitattributes b/.gitattributes index 6033910f7996fb60d5da2cedaf4aad1d905a03e0..e130665bb8123a223b95dc9571dcda8cda9324d0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -756,3 +756,11 @@ deepseekvl2/lib/python3.10/lib-dynload/_codecs_tw.cpython-310-x86_64-linux-gnu.s deepseekvl2/lib/python3.10/lib-dynload/_socket.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text evalkit_tf440/lib/libreadline.a filter=lfs diff=lfs merge=lfs -text infer_4_33_0/lib/python3.10/site-packages/h5py/defs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +evalkit_tf440/lib/libtinfow.so.6.4 filter=lfs diff=lfs merge=lfs -text +evalkit_tf440/lib/libsqlite3.so.0 filter=lfs diff=lfs merge=lfs -text +evalkit_tf440/lib/libbz2.so.1.0.8 filter=lfs diff=lfs merge=lfs -text +evalkit_tf440/lib/libstdc++.so.6.0.29 filter=lfs diff=lfs merge=lfs -text +evalkit_tf440/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text +evalkit_tf440/lib/libstdc++.so filter=lfs diff=lfs merge=lfs -text +evalkit_tf440/lib/libtinfo.so filter=lfs diff=lfs merge=lfs -text +evalkit_tf440/lib/libncurses++.a filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_tf440/lib/libbz2.so.1.0.8 b/evalkit_tf440/lib/libbz2.so.1.0.8 new file mode 100644 index 0000000000000000000000000000000000000000..7e057fb75b1b533e33984742a4a02254948e177f --- /dev/null +++ b/evalkit_tf440/lib/libbz2.so.1.0.8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4979469ae49ac144f62202f75bbdd69b17197aedb879d633337c8cf7e4aba301 +size 229016 diff --git a/evalkit_tf440/lib/libncurses++.a b/evalkit_tf440/lib/libncurses++.a new file mode 100644 index 0000000000000000000000000000000000000000..592b1b981d3fb155dffb6c4dcc9335849efc088c --- /dev/null +++ b/evalkit_tf440/lib/libncurses++.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93b48c40f5d7b07e1a8c4bd9419df55c28e250cca1166be4aafd2fc7caf18823 +size 187604 diff --git a/evalkit_tf440/lib/libsqlite3.so.0 b/evalkit_tf440/lib/libsqlite3.so.0 new file mode 100644 index 0000000000000000000000000000000000000000..531fb86e0309a27d33fb4bc03e4442023e5cd590 --- /dev/null +++ b/evalkit_tf440/lib/libsqlite3.so.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71932eb5bf89092fbd2c900601fc9f24aa184d65038aaec2445fd11b1d923327 +size 1543808 diff --git a/evalkit_tf440/lib/libsqlite3.so.0.8.6 b/evalkit_tf440/lib/libsqlite3.so.0.8.6 new file mode 100644 index 0000000000000000000000000000000000000000..531fb86e0309a27d33fb4bc03e4442023e5cd590 --- /dev/null +++ b/evalkit_tf440/lib/libsqlite3.so.0.8.6 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71932eb5bf89092fbd2c900601fc9f24aa184d65038aaec2445fd11b1d923327 +size 1543808 diff --git a/evalkit_tf440/lib/libstdc++.so b/evalkit_tf440/lib/libstdc++.so new file mode 100644 index 0000000000000000000000000000000000000000..577831ce0f1dc31bc63f9d1060ba2c774fd05303 --- /dev/null +++ b/evalkit_tf440/lib/libstdc++.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f045231ff3a95c2fbfde450575f0ef45d23e95be15193c8729b521fc363ece4 +size 17981480 diff --git a/evalkit_tf440/lib/libstdc++.so.6.0.29 b/evalkit_tf440/lib/libstdc++.so.6.0.29 new file mode 100644 index 0000000000000000000000000000000000000000..577831ce0f1dc31bc63f9d1060ba2c774fd05303 --- /dev/null +++ b/evalkit_tf440/lib/libstdc++.so.6.0.29 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f045231ff3a95c2fbfde450575f0ef45d23e95be15193c8729b521fc363ece4 +size 17981480 diff --git a/evalkit_tf440/lib/libtinfo.so b/evalkit_tf440/lib/libtinfo.so new file mode 100644 index 0000000000000000000000000000000000000000..81c464858755f2d6bf6f7ccc2d3905b601df6882 --- /dev/null +++ b/evalkit_tf440/lib/libtinfo.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2024856ea882d3151c1de53fcc7c66af037565fb8b387e4db35fb80b61ca49b4 +size 287080 diff --git a/evalkit_tf440/lib/libtinfow.so.6.4 b/evalkit_tf440/lib/libtinfow.so.6.4 new file mode 100644 index 0000000000000000000000000000000000000000..81c464858755f2d6bf6f7ccc2d3905b601df6882 --- /dev/null +++ b/evalkit_tf440/lib/libtinfow.so.6.4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2024856ea882d3151c1de53fcc7c66af037565fb8b387e4db35fb80b61ca49b4 +size 287080 diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/__init__.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/client_feature_flags.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/client_feature_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..1b289f010f8338dca5b4a56c0cd5a557299ef36b --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/client_feature_flags.py @@ -0,0 +1,113 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Middleware for injecting client-side feature flags into the Context.""" + +import json +import urllib.parse + +from tensorboard import context +from tensorboard import errors + + +class ClientFeatureFlagsMiddleware: + """Middleware for injecting client-side feature flags into the Context. + + The client webapp is expected to include a json-serialized version of its + FeatureFlags in the `X-TensorBoard-Feature-Flags` header or the + `tensorBoardFeatureFlags` query parameter. This middleware extracts the + header or query parameter value and converts it into the client_feature_flags + property for the DataProvider's Context object, where client_feature_flags + is a Dict of string keys and arbitrary value types. + + In the event that both the header and query parameter are specified, the + values from the header will take precedence. + """ + + def __init__(self, application): + """Initializes this middleware. + + Args: + application: The WSGI application to wrap (see PEP 3333). + """ + self._application = application + + def __call__(self, environ, start_response): + header_feature_flags = self._parse_potential_header_param_flags( + environ.get("HTTP_X_TENSORBOARD_FEATURE_FLAGS") + ) + query_string_feature_flags = self._parse_potential_query_param_flags( + environ.get("QUERY_STRING") + ) + + if not header_feature_flags and not query_string_feature_flags: + return self._application(environ, start_response) + + # header flags take precedence + for flag, value in header_feature_flags.items(): + query_string_feature_flags[flag] = value + + ctx = context.from_environ(environ).replace( + client_feature_flags=query_string_feature_flags + ) + context.set_in_environ(environ, ctx) + + return self._application(environ, start_response) + + def _parse_potential_header_param_flags(self, header_string): + if not header_string: + return {} + + try: + header_feature_flags = json.loads(header_string) + except json.JSONDecodeError: + raise errors.InvalidArgumentError( + "X-TensorBoard-Feature-Flags cannot be JSON decoded." + ) + + if not isinstance(header_feature_flags, dict): + raise errors.InvalidArgumentError( + "X-TensorBoard-Feature-Flags cannot be decoded to a dict." + ) + + return header_feature_flags + + def _parse_potential_query_param_flags(self, query_string): + if not query_string: + return {} + + try: + query_string_json = urllib.parse.parse_qs(query_string) + except ValueError: + return {} + + # parse_qs returns the dictionary values as lists for each name. + potential_feature_flags = query_string_json.get( + "tensorBoardFeatureFlags", [] + ) + if not potential_feature_flags: + return {} + try: + client_feature_flags = json.loads(potential_feature_flags[0]) + except json.JSONDecodeError: + raise errors.InvalidArgumentError( + "tensorBoardFeatureFlags cannot be JSON decoded." + ) + + if not isinstance(client_feature_flags, dict): + raise errors.InvalidArgumentError( + "tensorBoardFeatureFlags cannot be decoded to a dict." + ) + + return client_feature_flags diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/empty_path_redirect.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/empty_path_redirect.py new file mode 100644 index 0000000000000000000000000000000000000000..63db73fa165d11e63fa75f85f5c5596e9eb12f9f --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/empty_path_redirect.py @@ -0,0 +1,46 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Redirect from an empty path to the virtual application root. + +Sometimes, middleware transformations will make the path empty: for +example, navigating to "/foo" (no trailing slash) when the path prefix +is exactly "/foo". In such cases, relative links on the frontend would +break. Instead of handling this special case in each relevant +middleware, we install a top-level redirect handler from "" to "/". + +This middleware respects `SCRIPT_NAME` as described by the WSGI spec. If +`SCRIPT_NAME` is set to "/foo", then an empty `PATH_INFO` corresponds to +the actual path "/foo", and so will be redirected to "/foo/". +""" + + +class EmptyPathRedirectMiddleware: + """WSGI middleware to redirect from "" to "/".""" + + def __init__(self, application): + """Initializes this middleware. + + Args: + application: The WSGI application to wrap (see PEP 3333). + """ + self._application = application + + def __call__(self, environ, start_response): + path = environ.get("PATH_INFO", "") + if path: + return self._application(environ, start_response) + location = environ.get("SCRIPT_NAME", "") + "/" + start_response("301 Moved Permanently", [("Location", location)]) + return [] diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/data_ingester.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/data_ingester.py new file mode 100644 index 0000000000000000000000000000000000000000..52ec9e3275ff062b9dae60775ff74361f94a0751 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/data_ingester.py @@ -0,0 +1,277 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides data ingestion logic backed by local event processing.""" + +import os +import re +import threading +import time + + +from tensorboard.backend.event_processing import data_provider +from tensorboard.backend.event_processing import plugin_event_multiplexer +from tensorboard.backend.event_processing import tag_types +from tensorboard.compat import tf +from tensorboard.data import ingester +from tensorboard.plugins.audio import metadata as audio_metadata +from tensorboard.plugins.histogram import metadata as histogram_metadata +from tensorboard.plugins.image import metadata as image_metadata +from tensorboard.plugins.pr_curve import metadata as pr_curve_metadata +from tensorboard.plugins.scalar import metadata as scalar_metadata +from tensorboard.util import tb_logging + + +DEFAULT_SIZE_GUIDANCE = { + tag_types.TENSORS: 10, +} + +# TODO(@wchargin): Replace with something that works for third-party plugins. +DEFAULT_TENSOR_SIZE_GUIDANCE = { + scalar_metadata.PLUGIN_NAME: 1000, + image_metadata.PLUGIN_NAME: 10, + audio_metadata.PLUGIN_NAME: 10, + histogram_metadata.PLUGIN_NAME: 500, + pr_curve_metadata.PLUGIN_NAME: 100, +} + +logger = tb_logging.get_logger() + + +class LocalDataIngester(ingester.DataIngester): + """Data ingestion implementation to use when running locally.""" + + def __init__(self, flags): + """Initializes a `LocalDataIngester` from `flags`. + + Args: + flags: An argparse.Namespace containing TensorBoard CLI flags. + + Returns: + The new `LocalDataIngester`. + """ + tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE) + tensor_size_guidance.update(flags.samples_per_plugin) + self._multiplexer = plugin_event_multiplexer.EventMultiplexer( + size_guidance=DEFAULT_SIZE_GUIDANCE, + tensor_size_guidance=tensor_size_guidance, + purge_orphaned_data=flags.purge_orphaned_data, + max_reload_threads=flags.max_reload_threads, + event_file_active_filter=_get_event_file_active_filter(flags), + detect_file_replacement=flags.detect_file_replacement, + ) + self._data_provider = data_provider.MultiplexerDataProvider( + self._multiplexer, flags.logdir or flags.logdir_spec + ) + self._reload_interval = flags.reload_interval + self._reload_task = flags.reload_task + if flags.logdir: + self._path_to_run = {os.path.expanduser(flags.logdir): None} + else: + self._path_to_run = _parse_event_files_spec(flags.logdir_spec) + + # Conditionally import tensorflow_io. + if getattr(tf, "__version__", "stub") != "stub": + _check_filesystem_support(self._path_to_run.keys()) + + @property + def data_provider(self): + return self._data_provider + + @property + def deprecated_multiplexer(self): + return self._multiplexer + + def start(self): + """Starts ingesting data based on the ingester flag configuration.""" + + def _reload(): + while True: + start = time.time() + logger.info("TensorBoard reload process beginning") + for path, name in self._path_to_run.items(): + self._multiplexer.AddRunsFromDirectory(path, name) + logger.info( + "TensorBoard reload process: Reload the whole Multiplexer" + ) + self._multiplexer.Reload() + duration = time.time() - start + logger.info( + "TensorBoard done reloading. Load took %0.3f secs", duration + ) + if self._reload_interval == 0: + # Only load the multiplexer once. Do not continuously reload. + break + time.sleep(self._reload_interval) + + if self._reload_task == "process": + logger.info("Launching reload in a child process") + import multiprocessing + + process = multiprocessing.Process(target=_reload, name="Reloader") + # Best-effort cleanup; on exit, the main TB parent process will attempt to + # kill all its daemonic children. + process.daemon = True + process.start() + elif self._reload_task in ("thread", "auto"): + logger.info("Launching reload in a daemon thread") + thread = threading.Thread(target=_reload, name="Reloader") + # Make this a daemon thread, which won't block TB from exiting. + thread.daemon = True + thread.start() + elif self._reload_task == "blocking": + if self._reload_interval != 0: + raise ValueError( + "blocking reload only allowed with load_interval=0" + ) + _reload() + else: + raise ValueError("unrecognized reload_task: %s" % self._reload_task) + + +def _get_event_file_active_filter(flags): + """Returns a predicate for whether an event file load timestamp is active. + + Returns: + A predicate function accepting a single UNIX timestamp float argument, or + None if multi-file loading is not enabled. + """ + if not flags.reload_multifile: + return None + inactive_secs = flags.reload_multifile_inactive_secs + if inactive_secs == 0: + return None + if inactive_secs < 0: + return lambda timestamp: True + return lambda timestamp: timestamp + inactive_secs >= time.time() + + +def _parse_event_files_spec(logdir_spec): + """Parses `logdir_spec` into a map from paths to run group names. + + The `--logdir_spec` flag format is a comma-separated list of path + specifications. A path spec looks like 'group_name:/path/to/directory' or + '/path/to/directory'; in the latter case, the group is unnamed. Group names + cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec + with no name and path '/foo:bar/baz'. + + Globs are not supported. + + Args: + logdir: A comma-separated list of run specifications. + Returns: + A dict mapping directory paths to names like {'/path/to/directory': 'name'}. + Groups without an explicit name are named after their path. If logdir is + None, returns an empty dict, which is helpful for testing things that don't + require any valid runs. + """ + files = {} + if logdir_spec is None: + return files + # Make sure keeping consistent with ParseURI in core/lib/io/path.cc + uri_pattern = re.compile("[a-zA-Z][0-9a-zA-Z.]*://.*") + for specification in logdir_spec.split(","): + # Check if the spec contains group. A spec start with xyz:// is regarded as + # URI path spec instead of group spec. If the spec looks like /foo:bar/baz, + # then we assume it's a path with a colon. If the spec looks like + # [a-zA-z]:\foo then we assume its a Windows path and not a single letter + # group + if ( + uri_pattern.match(specification) is None + and ":" in specification + and specification[0] != "/" + and not os.path.splitdrive(specification)[0] + ): + # We split at most once so run_name:/path:with/a/colon will work. + run_name, _, path = specification.partition(":") + else: + run_name = None + path = specification + if uri_pattern.match(path) is None: + path = os.path.realpath(os.path.expanduser(path)) + files[path] = run_name + return files + + +def _get_filesystem_scheme(path): + """Extracts filesystem scheme from a given path. + + The filesystem scheme is usually separated by `://` from the local filesystem + path if given. For example, the scheme of `file://tmp/tf` is `file`. + + Args: + path: A strings representing an input log directory. + Returns: + Filesystem scheme, None if the path doesn't contain one. + """ + if "://" not in path: + return None + return path.split("://")[0] + + +def _check_filesystem_support(paths): + """Examines the list of filesystems user requested. + + If TF I/O schemes are requested, try to import tensorflow_io module. + + Args: + paths: A list of strings representing input log directories. + """ + get_registered_schemes = getattr( + tf.io.gfile, "get_registered_schemes", None + ) + registered_schemes = ( + None if get_registered_schemes is None else get_registered_schemes() + ) + + # Only need to check one path for each scheme. + scheme_to_path = {_get_filesystem_scheme(path): path for path in paths} + missing_scheme = None + for scheme, path in scheme_to_path.items(): + if scheme is None: + continue + # Use `tf.io.gfile.exists.get_registered_schemes` if possible. + if registered_schemes is not None: + if scheme not in registered_schemes: + missing_scheme = scheme + break + else: + # Fall back to `tf.io.gfile.exists`. + try: + tf.io.gfile.exists(path) + except tf.errors.UnimplementedError: + missing_scheme = scheme + break + except tf.errors.OpError: + # Swallow other errors; we aren't concerned about them at this point. + pass + + if missing_scheme: + try: + import tensorflow_io # noqa: F401 + except ImportError as e: + supported_schemes_msg = ( + " (supported schemes: {})".format(registered_schemes) + if registered_schemes + else "" + ) + raise tf.errors.UnimplementedError( + None, + None, + ( + "Error: Unsupported filename scheme '{}'{}. For additional" + + " filesystem support, consider installing TensorFlow I/O" + + " (https://www.tensorflow.org/io) via `pip install tensorflow-io`." + ).format(missing_scheme, supported_schemes_msg), + ) from e diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_accumulator.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_accumulator.py new file mode 100644 index 0000000000000000000000000000000000000000..86369be1f8a7dadb4071f45d9f1a1ebb12168503 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_accumulator.py @@ -0,0 +1,951 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Takes a generator of values, and accumulates them for a frontend.""" + +import collections +import dataclasses +import threading + +from typing import Optional, Sequence, Tuple + +from tensorboard.backend.event_processing import directory_watcher +from tensorboard.backend.event_processing import event_file_loader +from tensorboard.backend.event_processing import event_util +from tensorboard.backend.event_processing import io_wrapper +from tensorboard.backend.event_processing import plugin_asset_util +from tensorboard.backend.event_processing import reservoir +from tensorboard.backend.event_processing import tag_types +from tensorboard.compat.proto import config_pb2 +from tensorboard.compat.proto import event_pb2 +from tensorboard.compat.proto import graph_pb2 +from tensorboard.compat.proto import meta_graph_pb2 +from tensorboard.compat.proto import tensor_pb2 +from tensorboard.plugins.distribution import compressor +from tensorboard.util import tb_logging + + +logger = tb_logging.get_logger() + + +@dataclasses.dataclass(frozen=True) +class ScalarEvent: + """Contains information of a scalar event. + + Attributes: + wall_time: Timestamp of the event in seconds. + step: Global step of the event. + value: A float or int value of the scalar. + """ + + wall_time: float + step: int + value: float + + +@dataclasses.dataclass(frozen=True) +class CompressedHistogramEvent: + """Contains information of a compressed histogram event. + + Attributes: + wall_time: Timestamp of the event in seconds. + step: Global step of the event. + compressed_histogram_values: A sequence of tuples of basis points and + associated values in a compressed histogram. + """ + + wall_time: float + step: int + compressed_histogram_values: Sequence[Tuple[float, float]] + + +@dataclasses.dataclass(frozen=True) +class HistogramValue: + """Holds the information of the histogram values. + + Attributes: + min: A float or int min value. + max: A float or int max value. + num: Total number of values. + sum: Sum of all values. + sum_squares: Sum of squares for all values. + bucket_limit: Upper values per bucket. + bucket: Numbers of values per bucket. + """ + + min: float + max: float + num: int + sum: float + sum_squares: float + bucket_limit: Sequence[float] + bucket: Sequence[int] + + +@dataclasses.dataclass(frozen=True) +class HistogramEvent: + """Contains information of a histogram event. + + Attributes: + wall_time: Timestamp of the event in seconds. + step: Global step of the event. + histogram_value: Information of the histogram values. + """ + + wall_time: float + step: int + histogram_value: HistogramValue + + +@dataclasses.dataclass(frozen=True) +class ImageEvent: + """Contains information of an image event. + + Attributes: + wall_time: Timestamp of the event in seconds. + step: Global step of the event. + encoded_image_string: Image content encoded in bytes. + width: Width of the image. + height: Height of the image. + """ + + wall_time: float + step: int + encoded_image_string: bytes + width: int + height: int + + +@dataclasses.dataclass(frozen=True) +class AudioEvent: + """Contains information of an audio event. + + Attributes: + wall_time: Timestamp of the event in seconds. + step: Global step of the event. + encoded_audio_string: Audio content encoded in bytes. + content_type: A string describes the type of the audio content. + sample_rate: Sample rate of the audio in Hz. Must be positive. + length_frames: Length of the audio in frames (samples per channel). + """ + + wall_time: float + step: int + encoded_audio_string: bytes + content_type: str + sample_rate: float + length_frames: int + + +@dataclasses.dataclass(frozen=True) +class TensorEvent: + """A tensor event. + + Attributes: + wall_time: Timestamp of the event in seconds. + step: Global step of the event. + tensor_proto: A `TensorProto`. + """ + + wall_time: float + step: int + tensor_proto: tensor_pb2.TensorProto + + +## Different types of summary events handled by the event_accumulator +SUMMARY_TYPES = { + "simple_value": "_ProcessScalar", + "histo": "_ProcessHistogram", + "image": "_ProcessImage", + "audio": "_ProcessAudio", + "tensor": "_ProcessTensor", +} + +# Legacy aliases +COMPRESSED_HISTOGRAMS = tag_types.COMPRESSED_HISTOGRAMS +HISTOGRAMS = tag_types.HISTOGRAMS +IMAGES = tag_types.IMAGES +AUDIO = tag_types.AUDIO +SCALARS = tag_types.SCALARS +TENSORS = tag_types.TENSORS +GRAPH = tag_types.GRAPH +META_GRAPH = tag_types.META_GRAPH +RUN_METADATA = tag_types.RUN_METADATA + +## Normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf) +## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev, +## and then the long tail. +NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000) + +DEFAULT_SIZE_GUIDANCE = { + COMPRESSED_HISTOGRAMS: 500, + IMAGES: 4, + AUDIO: 4, + SCALARS: 10000, + HISTOGRAMS: 1, + TENSORS: 10, +} + +STORE_EVERYTHING_SIZE_GUIDANCE = { + COMPRESSED_HISTOGRAMS: 0, + IMAGES: 0, + AUDIO: 0, + SCALARS: 0, + HISTOGRAMS: 0, + TENSORS: 0, +} + + +class EventAccumulator: + """An `EventAccumulator` takes an event generator, and accumulates the + values. + + The `EventAccumulator` is intended to provide a convenient Python interface + for loading Event data written during a TensorFlow run. TensorFlow writes out + `Event` protobuf objects, which have a timestamp and step number, and often + contain a `Summary`. Summaries can have different kinds of data like an image, + a scalar value, or a histogram. The Summaries also have a tag, which we use to + organize logically related data. The `EventAccumulator` supports retrieving + the `Event` and `Summary` data by its tag. + + Calling `Tags()` gets a map from `tagType` (e.g. `'images'`, + `'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those + data types. Then, various functional endpoints (eg + `Accumulator.Scalars(tag)`) allow for the retrieval of all data + associated with that tag. + + The `Reload()` method synchronously loads all of the data written so far. + + Histograms, audio, and images are very large, so storing all of them is not + recommended. + + Fields: + audios: A reservoir.Reservoir of audio summaries. + compressed_histograms: A reservoir.Reservoir of compressed + histogram summaries. + histograms: A reservoir.Reservoir of histogram summaries. + images: A reservoir.Reservoir of image summaries. + most_recent_step: Step of last Event proto added. This should only + be accessed from the thread that calls Reload. This is -1 if + nothing has been loaded yet. + most_recent_wall_time: Timestamp of last Event proto added. This is + a float containing seconds from the UNIX epoch, or -1 if + nothing has been loaded yet. This should only be accessed from + the thread that calls Reload. + path: A file path to a directory containing tf events files, or a single + tf events file. The accumulator will load events from this path. + scalars: A reservoir.Reservoir of scalar summaries. + tensors: A reservoir.Reservoir of tensor summaries. + + @@Tensors + """ + + def __init__( + self, + path, + size_guidance=None, + compression_bps=NORMAL_HISTOGRAM_BPS, + purge_orphaned_data=True, + ): + """Construct the `EventAccumulator`. + + Args: + path: A file path to a directory containing tf events files, or a single + tf events file. The accumulator will load events from this path. + size_guidance: Information on how much data the EventAccumulator should + store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much + so as to avoid OOMing the client. The size_guidance should be a map + from a `tagType` string to an integer representing the number of + items to keep per tag for items of that `tagType`. If the size is 0, + all events are stored. + compression_bps: Information on how the `EventAccumulator` should compress + histogram data for the `CompressedHistograms` tag (for details see + `ProcessCompressedHistogram`). + purge_orphaned_data: Whether to discard any events that were "orphaned" by + a TensorFlow restart. + """ + size_guidance = size_guidance or DEFAULT_SIZE_GUIDANCE + sizes = {} + for key in DEFAULT_SIZE_GUIDANCE: + if key in size_guidance: + sizes[key] = size_guidance[key] + else: + sizes[key] = DEFAULT_SIZE_GUIDANCE[key] + + self._first_event_timestamp = None + self.scalars = reservoir.Reservoir(size=sizes[SCALARS]) + + self._graph = None + self._graph_from_metagraph = False + self._meta_graph = None + self._tagged_metadata = {} + self.summary_metadata = {} + self.histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS]) + self.compressed_histograms = reservoir.Reservoir( + size=sizes[COMPRESSED_HISTOGRAMS], always_keep_last=False + ) + self.images = reservoir.Reservoir(size=sizes[IMAGES]) + self.audios = reservoir.Reservoir(size=sizes[AUDIO]) + self.tensors = reservoir.Reservoir(size=sizes[TENSORS]) + + # Keep a mapping from plugin name to a dict mapping from tag to plugin data + # content obtained from the SummaryMetadata (metadata field of Value) for + # that plugin (This is not the entire SummaryMetadata proto - only the + # content for that plugin). The SummaryWriter only keeps the content on the + # first event encountered per tag, so we must store that first instance of + # content for each tag. + self._plugin_to_tag_to_content = collections.defaultdict(dict) + + self._generator_mutex = threading.Lock() + self.path = path + self._generator = _GeneratorFromPath(path) + + self._compression_bps = compression_bps + self.purge_orphaned_data = purge_orphaned_data + + self.most_recent_step = -1 + self.most_recent_wall_time = -1 + self.file_version = None + + # Name of the source writer that writes the event. + self._source_writer = None + + # The attributes that get built up by the accumulator + self.accumulated_attrs = ( + "scalars", + "histograms", + "compressed_histograms", + "images", + "audios", + ) + self._tensor_summaries = {} + + def Reload(self): + """Loads all events added since the last call to `Reload`. + + If `Reload` was never called, loads all events in the file. + + Returns: + The `EventAccumulator`. + """ + with self._generator_mutex: + for event in self._generator.Load(): + self._ProcessEvent(event) + return self + + def PluginAssets(self, plugin_name): + """Return a list of all plugin assets for the given plugin. + + Args: + plugin_name: The string name of a plugin to retrieve assets for. + + Returns: + A list of string plugin asset names, or empty list if none are available. + If the plugin was not registered, an empty list is returned. + """ + return plugin_asset_util.ListAssets(self.path, plugin_name) + + def RetrievePluginAsset(self, plugin_name, asset_name): + """Return the contents of a given plugin asset. + + Args: + plugin_name: The string name of a plugin. + asset_name: The string name of an asset. + + Returns: + The string contents of the plugin asset. + + Raises: + KeyError: If the asset is not available. + """ + return plugin_asset_util.RetrieveAsset( + self.path, plugin_name, asset_name + ) + + def FirstEventTimestamp(self): + """Returns the timestamp in seconds of the first event. + + If the first event has been loaded (either by this method or by `Reload`, + this returns immediately. Otherwise, it will load in the first event. Note + that this means that calling `Reload` will cause this to block until + `Reload` has finished. + + Returns: + The timestamp in seconds of the first event that was loaded. + + Raises: + ValueError: If no events have been loaded and there were no events found + on disk. + """ + if self._first_event_timestamp is not None: + return self._first_event_timestamp + with self._generator_mutex: + try: + event = next(self._generator.Load()) + self._ProcessEvent(event) + return self._first_event_timestamp + + except StopIteration: + raise ValueError("No event timestamp could be found") + + def GetSourceWriter(self) -> Optional[str]: + """Returns the name of the event writer.""" + if self._source_writer is not None: + return self._source_writer + with self._generator_mutex: + try: + event = next(self._generator.Load()) + self._ProcessEvent(event) + return self._source_writer + except StopIteration: + logger.info( + "End of file in %s, no source writer was found.", self.path + ) + + def PluginTagToContent(self, plugin_name): + """Returns a dict mapping tags to content specific to that plugin. + + Args: + plugin_name: The name of the plugin for which to fetch plugin-specific + content. + + Raises: + KeyError: if the plugin name is not found. + + Returns: + A dict mapping tag names to bytestrings of plugin-specific content-- by + convention, in the form of binary serialized protos. + """ + if plugin_name not in self._plugin_to_tag_to_content: + raise KeyError("Plugin %r could not be found." % plugin_name) + return self._plugin_to_tag_to_content[plugin_name] + + def SummaryMetadata(self, tag): + """Given a summary tag name, return the associated metadata object. + + Args: + tag: The name of a tag, as a string. + + Raises: + KeyError: If the tag is not found. + + Returns: + A `SummaryMetadata` protobuf. + """ + return self.summary_metadata[tag] + + def _ProcessEvent(self, event): + """Called whenever an event is loaded.""" + if self._first_event_timestamp is None: + self._first_event_timestamp = event.wall_time + + if event.HasField("source_metadata"): + new_source_writer = event_util.GetSourceWriter( + event.source_metadata + ) + if self._source_writer and self._source_writer != new_source_writer: + logger.info( + ( + "Found new source writer for event.proto. " + "Old: {0}, New: {1}" + ).format(self._source_writer, new_source_writer) + ) + self._source_writer = new_source_writer + + if event.HasField("file_version"): + new_file_version = event_util.ParseFileVersion(event.file_version) + if self.file_version and self.file_version != new_file_version: + ## This should not happen. + logger.warning( + ( + "Found new file_version for event.proto. This will " + "affect purging logic for TensorFlow restarts. " + "Old: {0} New: {1}" + ).format(self.file_version, new_file_version) + ) + self.file_version = new_file_version + + self._MaybePurgeOrphanedData(event) + + ## Process the event. + # GraphDef and MetaGraphDef are handled in a special way: + # If no graph_def Event is available, but a meta_graph_def is, and it + # contains a graph_def, then use the meta_graph_def.graph_def as our graph. + # If a graph_def Event is available, always prefer it to the graph_def + # inside the meta_graph_def. + if event.HasField("graph_def"): + if self._graph is not None: + logger.warning( + ( + "Found more than one graph event per run, or there was " + "a metagraph containing a graph_def, as well as one or " + "more graph events. Overwriting the graph with the " + "newest event." + ) + ) + self._graph = event.graph_def + self._graph_from_metagraph = False + elif event.HasField("meta_graph_def"): + if self._meta_graph is not None: + logger.warning( + ( + "Found more than one metagraph event per run. " + "Overwriting the metagraph with the newest event." + ) + ) + self._meta_graph = event.meta_graph_def + if self._graph is None or self._graph_from_metagraph: + # We may have a graph_def in the metagraph. If so, and no + # graph_def is directly available, use this one instead. + meta_graph = meta_graph_pb2.MetaGraphDef() + meta_graph.ParseFromString(self._meta_graph) + if meta_graph.graph_def: + if self._graph is not None: + logger.warning( + ( + "Found multiple metagraphs containing graph_defs," + "but did not find any graph events. Overwriting the " + "graph with the newest metagraph version." + ) + ) + self._graph_from_metagraph = True + self._graph = meta_graph.graph_def.SerializeToString() + elif event.HasField("tagged_run_metadata"): + tag = event.tagged_run_metadata.tag + if tag in self._tagged_metadata: + logger.warning( + 'Found more than one "run metadata" event with tag ' + + tag + + ". Overwriting it with the newest event." + ) + self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata + elif event.HasField("summary"): + for value in event.summary.value: + if value.HasField("metadata"): + tag = value.tag + # We only store the first instance of the metadata. This check + # is important: the `FileWriter` does strip metadata from all + # values except the first one per each tag, but a new + # `FileWriter` is created every time a training job stops and + # restarts. Hence, we must also ignore non-initial metadata in + # this logic. + if tag not in self.summary_metadata: + self.summary_metadata[tag] = value.metadata + plugin_data = value.metadata.plugin_data + if plugin_data.plugin_name: + self._plugin_to_tag_to_content[ + plugin_data.plugin_name + ][tag] = plugin_data.content + else: + logger.warning( + ( + "This summary with tag %r is oddly not associated with a " + "plugin." + ), + tag, + ) + + for summary_type, summary_func in SUMMARY_TYPES.items(): + if value.HasField(summary_type): + datum = getattr(value, summary_type) + tag = value.tag + if summary_type == "tensor" and not tag: + # This tensor summary was created using the old method that used + # plugin assets. We must still continue to support it. + tag = value.node_name + getattr(self, summary_func)( + tag, event.wall_time, event.step, datum + ) + + def Tags(self): + """Return all tags found in the value stream. + + Returns: + A `{tagType: ['list', 'of', 'tags']}` dictionary. + """ + return { + IMAGES: self.images.Keys(), + AUDIO: self.audios.Keys(), + HISTOGRAMS: self.histograms.Keys(), + SCALARS: self.scalars.Keys(), + COMPRESSED_HISTOGRAMS: self.compressed_histograms.Keys(), + TENSORS: self.tensors.Keys(), + # Use a heuristic: if the metagraph is available, but + # graph is not, then we assume the metagraph contains the graph. + GRAPH: self._graph is not None, + META_GRAPH: self._meta_graph is not None, + RUN_METADATA: list(self._tagged_metadata.keys()), + } + + def Scalars(self, tag): + """Given a summary tag, return all associated `ScalarEvent`s. + + Args: + tag: A string tag associated with the events. + + Raises: + KeyError: If the tag is not found. + + Returns: + An array of `ScalarEvent`s. + """ + return self.scalars.Items(tag) + + def Graph(self): + """Return the graph definition, if there is one. + + If the graph is stored directly, return that. If no graph is stored + directly but a metagraph is stored containing a graph, return that. + + Raises: + ValueError: If there is no graph for this run. + + Returns: + The `graph_def` proto. + """ + graph = graph_pb2.GraphDef() + if self._graph is not None: + graph.ParseFromString(self._graph) + return graph + raise ValueError("There is no graph in this EventAccumulator") + + def MetaGraph(self): + """Return the metagraph definition, if there is one. + + Raises: + ValueError: If there is no metagraph for this run. + + Returns: + The `meta_graph_def` proto. + """ + if self._meta_graph is None: + raise ValueError("There is no metagraph in this EventAccumulator") + meta_graph = meta_graph_pb2.MetaGraphDef() + meta_graph.ParseFromString(self._meta_graph) + return meta_graph + + def RunMetadata(self, tag): + """Given a tag, return the associated session.run() metadata. + + Args: + tag: A string tag associated with the event. + + Raises: + ValueError: If the tag is not found. + + Returns: + The metadata in form of `RunMetadata` proto. + """ + if tag not in self._tagged_metadata: + raise ValueError("There is no run metadata with this tag name") + + run_metadata = config_pb2.RunMetadata() + run_metadata.ParseFromString(self._tagged_metadata[tag]) + return run_metadata + + def Histograms(self, tag): + """Given a summary tag, return all associated histograms. + + Args: + tag: A string tag associated with the events. + + Raises: + KeyError: If the tag is not found. + + Returns: + An array of `HistogramEvent`s. + """ + return self.histograms.Items(tag) + + def CompressedHistograms(self, tag): + """Given a summary tag, return all associated compressed histograms. + + Args: + tag: A string tag associated with the events. + + Raises: + KeyError: If the tag is not found. + + Returns: + An array of `CompressedHistogramEvent`s. + """ + return self.compressed_histograms.Items(tag) + + def Images(self, tag): + """Given a summary tag, return all associated images. + + Args: + tag: A string tag associated with the events. + + Raises: + KeyError: If the tag is not found. + + Returns: + An array of `ImageEvent`s. + """ + return self.images.Items(tag) + + def Audio(self, tag): + """Given a summary tag, return all associated audio. + + Args: + tag: A string tag associated with the events. + + Raises: + KeyError: If the tag is not found. + + Returns: + An array of `AudioEvent`s. + """ + return self.audios.Items(tag) + + def Tensors(self, tag): + """Given a summary tag, return all associated tensors. + + Args: + tag: A string tag associated with the events. + + Raises: + KeyError: If the tag is not found. + + Returns: + An array of `TensorEvent`s. + """ + return self.tensors.Items(tag) + + def _MaybePurgeOrphanedData(self, event): + """Maybe purge orphaned data due to a TensorFlow crash. + + When TensorFlow crashes at step T+O and restarts at step T, any events + written after step T are now "orphaned" and will be at best misleading if + they are included in TensorBoard. + + This logic attempts to determine if there is orphaned data, and purge it + if it is found. + + Args: + event: The event to use as a reference, to determine if a purge is needed. + """ + if not self.purge_orphaned_data: + return + ## Check if the event happened after a crash, and purge expired tags. + if self.file_version and self.file_version >= 2: + ## If the file_version is recent enough, use the SessionLog enum + ## to check for restarts. + self._CheckForRestartAndMaybePurge(event) + else: + ## If there is no file version, default to old logic of checking for + ## out of order steps. + self._CheckForOutOfOrderStepAndMaybePurge(event) + + def _CheckForRestartAndMaybePurge(self, event): + """Check and discard expired events using SessionLog.START. + + Check for a SessionLog.START event and purge all previously seen events + with larger steps, because they are out of date. Because of supervisor + threading, it is possible that this logic will cause the first few event + messages to be discarded since supervisor threading does not guarantee + that the START message is deterministically written first. + + This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which + can inadvertently discard events due to supervisor threading. + + Args: + event: The event to use as reference. If the event is a START event, all + previously seen events with a greater event.step will be purged. + """ + if ( + event.HasField("session_log") + and event.session_log.status == event_pb2.SessionLog.START + ): + self._Purge(event, by_tags=False) + + def _CheckForOutOfOrderStepAndMaybePurge(self, event): + """Check for out-of-order event.step and discard expired events for + tags. + + Check if the event is out of order relative to the global most recent step. + If it is, purge outdated summaries for tags that the event contains. + + Args: + event: The event to use as reference. If the event is out-of-order, all + events with the same tags, but with a greater event.step will be purged. + """ + if event.step < self.most_recent_step and event.HasField("summary"): + self._Purge(event, by_tags=True) + else: + self.most_recent_step = event.step + self.most_recent_wall_time = event.wall_time + + def _ConvertHistogramProtoToPopo(self, histo): + """Converts histogram proto to Python object.""" + return HistogramValue( + min=histo.min, + max=histo.max, + num=histo.num, + sum=histo.sum, + sum_squares=histo.sum_squares, + bucket_limit=list(histo.bucket_limit), + bucket=list(histo.bucket), + ) + + def _ProcessHistogram(self, tag, wall_time, step, histo): + """Processes a proto histogram by adding it to accumulated state.""" + histo = self._ConvertHistogramProtoToPopo(histo) + histo_ev = HistogramEvent(wall_time, step, histo) + self.histograms.AddItem(tag, histo_ev) + self.compressed_histograms.AddItem( + tag, histo_ev, self._CompressHistogram + ) + + def _CompressHistogram(self, histo_ev): + """Callback for _ProcessHistogram.""" + return CompressedHistogramEvent( + histo_ev.wall_time, + histo_ev.step, + compressor.compress_histogram_proto( + histo_ev.histogram_value, self._compression_bps + ), + ) + + def _ProcessImage(self, tag, wall_time, step, image): + """Processes an image by adding it to accumulated state.""" + event = ImageEvent( + wall_time=wall_time, + step=step, + encoded_image_string=image.encoded_image_string, + width=image.width, + height=image.height, + ) + self.images.AddItem(tag, event) + + def _ProcessAudio(self, tag, wall_time, step, audio): + """Processes a audio by adding it to accumulated state.""" + event = AudioEvent( + wall_time=wall_time, + step=step, + encoded_audio_string=audio.encoded_audio_string, + content_type=audio.content_type, + sample_rate=audio.sample_rate, + length_frames=audio.length_frames, + ) + self.audios.AddItem(tag, event) + + def _ProcessScalar(self, tag, wall_time, step, scalar): + """Processes a simple value by adding it to accumulated state.""" + sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar) + self.scalars.AddItem(tag, sv) + + def _ProcessTensor(self, tag, wall_time, step, tensor): + tv = TensorEvent(wall_time=wall_time, step=step, tensor_proto=tensor) + self.tensors.AddItem(tag, tv) + + def _Purge(self, event, by_tags): + """Purge all events that have occurred after the given event.step. + + If by_tags is True, purge all events that occurred after the given + event.step, but only for the tags that the event has. Non-sequential + event.steps suggest that a TensorFlow restart occurred, and we discard + the out-of-order events to display a consistent view in TensorBoard. + + Discarding by tags is the safer method, when we are unsure whether a restart + has occurred, given that threading in supervisor can cause events of + different tags to arrive with unsynchronized step values. + + If by_tags is False, then purge all events with event.step greater than the + given event.step. This can be used when we are certain that a TensorFlow + restart has occurred and these events can be discarded. + + Args: + event: The event to use as reference for the purge. All events with + the same tags, but with a greater event.step will be purged. + by_tags: Bool to dictate whether to discard all out-of-order events or + only those that are associated with the given reference event. + """ + ## Keep data in reservoirs that has a step less than event.step + _NotExpired = lambda x: x.step < event.step + + if by_tags: + + def _ExpiredPerTag(value): + return [ + getattr(self, x).FilterItems(_NotExpired, value.tag) + for x in self.accumulated_attrs + ] + + expired_per_tags = [ + _ExpiredPerTag(value) for value in event.summary.value + ] + expired_per_type = [sum(x) for x in zip(*expired_per_tags)] + else: + expired_per_type = [ + getattr(self, x).FilterItems(_NotExpired) + for x in self.accumulated_attrs + ] + + if sum(expired_per_type) > 0: + purge_msg = _GetPurgeMessage( + self.most_recent_step, + self.most_recent_wall_time, + event.step, + event.wall_time, + *expired_per_type, + ) + logger.warning(purge_msg) + + +def _GetPurgeMessage( + most_recent_step, + most_recent_wall_time, + event_step, + event_wall_time, + num_expired_scalars, + num_expired_histos, + num_expired_comp_histos, + num_expired_images, + num_expired_audio, +): + """Return the string message associated with TensorBoard purges.""" + return ( + "Detected out of order event.step likely caused by " + "a TensorFlow restart. Purging expired events from Tensorboard" + " display between the previous step: {} (timestamp: {}) and " + "current step: {} (timestamp: {}). Removing {} scalars, {} " + "histograms, {} compressed histograms, {} images, " + "and {} audio." + ).format( + most_recent_step, + most_recent_wall_time, + event_step, + event_wall_time, + num_expired_scalars, + num_expired_histos, + num_expired_comp_histos, + num_expired_images, + num_expired_audio, + ) + + +def _GeneratorFromPath(path): + """Create an event generator for file or directory at given path string.""" + if not path: + raise ValueError("path must be a valid string") + if io_wrapper.IsSummaryEventsFile(path): + return event_file_loader.LegacyEventFileLoader(path) + else: + return directory_watcher.DirectoryWatcher( + path, + event_file_loader.LegacyEventFileLoader, + io_wrapper.IsSummaryEventsFile, + ) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_file_inspector.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_file_inspector.py new file mode 100644 index 0000000000000000000000000000000000000000..3bf426cc52abff97202446e77e22635c62575cdd --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_file_inspector.py @@ -0,0 +1,465 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Logic for TensorBoard inspector to help humans investigate event files. + +Example usages: +tensorboard --inspect --event_file myevents.out +tensorboard --inspect --event_file myevents.out --tag loss +tensorboard --inspect --logdir mylogdir +tensorboard --inspect --logdir mylogdir --tag loss + + +This script runs over a logdir and creates an InspectionUnit for every +subdirectory with event files. If running over an event file, it creates only +one InspectionUnit. One block of output is printed to console for each +InspectionUnit. + +The primary content of an InspectionUnit is the dict field_to_obs that maps +fields (e.g. "scalar", "histogram", "session_log:start", etc.) to a list of +Observations for the field. Observations correspond one-to-one with Events in an +event file but contain less information because they only store what is +necessary to generate the final console output. + +The final output is rendered to console by applying some aggregating function +to the lists of Observations. Different functions are applied depending on the +type of field. For instance, for "scalar" fields, the inspector shows aggregate +statistics. For other fields like "session_log:start", all observed steps are +printed in order to aid debugging. + + +[1] Query a logdir or an event file for its logged tags and summary statistics +using --logdir or --event_file. + +[[event_file]] contains these tags: +histograms + binary/Sign/Activations + binary/nn_tanh/act/Activations + binary/nn_tanh/biases + binary/nn_tanh/biases:gradient + binary/nn_tanh/weights + binary/nn_tanh/weights:gradient +images + input_images/image/0 + input_images/image/1 + input_images/image/2 +scalars + Learning Rate + Total Cost + Total Cost (raw) + +Debug output aggregated over all tags: +graph + first_step 0 + last_step 0 + max_step 0 + min_step 0 + num_steps 1 + outoforder_steps [] +histograms + first_step 491 + last_step 659823 + max_step 659823 + min_step 491 + num_steps 993 + outoforder_steps [] +images - +scalars + first_step 0 + last_step 659823 + max_step 659823 + min_step 0 + num_steps 1985 + outoforder_steps [] +sessionlog:checkpoint + first_step 7129 + last_step 657167 + max_step 657167 + min_step 7129 + num_steps 99 + outoforder_steps [] +sessionlog:start + outoforder_steps [] + steps [0L] +sessionlog:stop - + + +[2] Drill down into a particular tag using --tag. + +Debug output for binary/Sign/Activations: +histograms + first_step 491 + last_step 659823 + max_step 659823 + min_step 491 + num_steps 993 + outoforder_steps [] +""" + + +import dataclasses +import itertools +import os + +from typing import Any, Generator, Mapping + +from tensorboard.backend.event_processing import event_accumulator +from tensorboard.backend.event_processing import event_file_loader +from tensorboard.backend.event_processing import io_wrapper +from tensorboard.compat import tf +from tensorboard.compat.proto import event_pb2 + + +# Map of field names within summary.proto to the user-facing names that this +# script outputs. +SUMMARY_TYPE_TO_FIELD = { + "simple_value": "scalars", + "histo": "histograms", + "image": "images", + "audio": "audio", +} +for summary_type in event_accumulator.SUMMARY_TYPES: + if summary_type not in SUMMARY_TYPE_TO_FIELD: + SUMMARY_TYPE_TO_FIELD[summary_type] = summary_type + +# Types of summaries that we may want to query for by tag. +TAG_FIELDS = list(SUMMARY_TYPE_TO_FIELD.values()) + +# Summaries that we want to see every instance of. +LONG_FIELDS = ["sessionlog:start", "sessionlog:stop"] + +# Summaries that we only want an abridged digest of, since they would +# take too much screen real estate otherwise. +SHORT_FIELDS = ["graph", "sessionlog:checkpoint"] + TAG_FIELDS + +# All summary types that we can inspect. +TRACKED_FIELDS = SHORT_FIELDS + LONG_FIELDS + +PRINT_SEPARATOR = "=" * 70 + "\n" + + +@dataclasses.dataclass(frozen=True) +class Observation: + """Contains the data within each Event file that the inspector cares about. + + The inspector accumulates Observations as it processes events. + + Attributes: + step: Global step of the event. + wall_time: Timestamp of the event in seconds. + tag: Tag name associated with the event. + """ + + step: int + wall_time: float + tag: str + + +@dataclasses.dataclass(frozen=True) +class InspectionUnit: + """Created for each organizational structure in the event files. + + An InspectionUnit is visible in the final terminal output. For instance, one + InspectionUnit is created for each subdirectory in logdir. When asked to inspect + a single event file, there may only be one InspectionUnit. + + Attributes: + name: Name of the organizational unit that will be printed to console. + generator: A generator that yields `Event` protos. + field_to_obs: A mapping from string fields to `Observations` that the inspector + creates. + """ + + name: str + generator: Generator[event_pb2.Event, Any, Any] + field_to_obs: Mapping[str, Observation] + + +def get_field_to_observations_map(generator, query_for_tag=""): + """Return a field to `Observations` dict for the event generator. + + Args: + generator: A generator over event protos. + query_for_tag: A string that if specified, only create observations for + events with this tag name. + + Returns: + A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list. + """ + + def increment(stat, event, tag=""): + assert stat in TRACKED_FIELDS + field_to_obs[stat].append( + dataclasses.asdict( + Observation(step=event.step, wall_time=event.wall_time, tag=tag) + ) + ) + + field_to_obs = dict([(t, []) for t in TRACKED_FIELDS]) + + for event in generator: + ## Process the event + if event.HasField("graph_def") and (not query_for_tag): + increment("graph", event) + if event.HasField("session_log") and (not query_for_tag): + status = event.session_log.status + if status == event_pb2.SessionLog.START: + increment("sessionlog:start", event) + elif status == event_pb2.SessionLog.STOP: + increment("sessionlog:stop", event) + elif status == event_pb2.SessionLog.CHECKPOINT: + increment("sessionlog:checkpoint", event) + elif event.HasField("summary"): + for value in event.summary.value: + if query_for_tag and value.tag != query_for_tag: + continue + + for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items(): + if value.HasField(proto_name): + increment(display_name, event, value.tag) + return field_to_obs + + +def get_unique_tags(field_to_obs): + """Returns a dictionary of tags that a user could query over. + + Args: + field_to_obs: Dict that maps string field to `Observation` list. + + Returns: + A dict that maps keys in `TAG_FIELDS` to a list of string tags present in + the event files. If the dict does not have any observations of the type, + maps to an empty list so that we can render this to console. + """ + return { + field: sorted(set([x.get("tag", "") for x in observations])) + for field, observations in field_to_obs.items() + if field in TAG_FIELDS + } + + +def print_dict(d, show_missing=True): + """Prints a shallow dict to console. + + Args: + d: Dict to print. + show_missing: Whether to show keys with empty values. + """ + for k, v in sorted(d.items()): + if (not v) and show_missing: + # No instances of the key, so print missing symbol. + print("{} -".format(k)) + elif isinstance(v, list): + # Value is a list, so print each item of the list. + print(k) + for item in v: + print(" {}".format(item)) + elif isinstance(v, dict): + # Value is a dict, so print each (key, value) pair of the dict. + print(k) + for kk, vv in sorted(v.items()): + print(" {:<20} {}".format(kk, vv)) + + +def get_dict_to_print(field_to_obs): + """Transform the field-to-obs mapping into a printable dictionary. + + Args: + field_to_obs: Dict that maps string field to `Observation` list. + + Returns: + A dict with the keys and values to print to console. + """ + + def compressed_steps(steps): + return { + "num_steps": len(set(steps)), + "min_step": min(steps), + "max_step": max(steps), + "last_step": steps[-1], + "first_step": steps[0], + "outoforder_steps": get_out_of_order(steps), + } + + def full_steps(steps): + return {"steps": steps, "outoforder_steps": get_out_of_order(steps)} + + output = {} + for field, observations in field_to_obs.items(): + if not observations: + output[field] = None + continue + + steps = [x["step"] for x in observations] + if field in SHORT_FIELDS: + output[field] = compressed_steps(steps) + if field in LONG_FIELDS: + output[field] = full_steps(steps) + + return output + + +def get_out_of_order(list_of_numbers): + """Returns elements that break the monotonically non-decreasing trend. + + This is used to find instances of global step values that are "out-of-order", + which may trigger TensorBoard event discarding logic. + + Args: + list_of_numbers: A list of numbers. + + Returns: + A list of tuples in which each tuple are two elements are adjacent, but the + second element is lower than the first. + """ + # TODO: Consider changing this to only check for out-of-order + # steps within a particular tag. + result = [] + # pylint: disable=consider-using-enumerate + for i in range(len(list_of_numbers)): + if i == 0: + continue + if list_of_numbers[i] < list_of_numbers[i - 1]: + result.append((list_of_numbers[i - 1], list_of_numbers[i])) + return result + + +def generators_from_logdir(logdir): + """Returns a list of event generators for subdirectories with event files. + + The number of generators returned should equal the number of directories + within logdir that contain event files. If only logdir contains event files, + returns a list of length one. + + Args: + logdir: A log directory that contains event files. + + Returns: + List of event generators for each subdirectory with event files. + """ + subdirs = io_wrapper.GetLogdirSubdirectories(logdir) + generators = [ + itertools.chain( + *[ + generator_from_event_file(os.path.join(subdir, f)) + for f in tf.io.gfile.listdir(subdir) + if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f)) + ] + ) + for subdir in subdirs + ] + return generators + + +def generator_from_event_file(event_file): + """Returns a generator that yields events from an event file.""" + return event_file_loader.LegacyEventFileLoader(event_file).Load() + + +def get_inspection_units(logdir="", event_file="", tag=""): + """Returns a list of InspectionUnit objects given either logdir or + event_file. + + If logdir is given, the number of InspectionUnits should equal the + number of directories or subdirectories that contain event files. + + If event_file is given, the number of InspectionUnits should be 1. + + Args: + logdir: A log directory that contains event files. + event_file: Or, a particular event file path. + tag: An optional tag name to query for. + + Returns: + A list of InspectionUnit objects. + """ + if logdir: + subdirs = io_wrapper.GetLogdirSubdirectories(logdir) + inspection_units = [] + for subdir in subdirs: + generator = itertools.chain( + *[ + generator_from_event_file(os.path.join(subdir, f)) + for f in tf.io.gfile.listdir(subdir) + if io_wrapper.IsTensorFlowEventsFile( + os.path.join(subdir, f) + ) + ] + ) + inspection_units.append( + InspectionUnit( + name=subdir, + generator=generator, + field_to_obs=get_field_to_observations_map(generator, tag), + ) + ) + if inspection_units: + print( + "Found event files in:\n{}\n".format( + "\n".join([u.name for u in inspection_units]) + ) + ) + elif io_wrapper.IsTensorFlowEventsFile(logdir): + print( + "It seems that {} may be an event file instead of a logdir. If this " + "is the case, use --event_file instead of --logdir to pass " + "it in.".format(logdir) + ) + else: + print("No event files found within logdir {}".format(logdir)) + return inspection_units + elif event_file: + generator = generator_from_event_file(event_file) + return [ + InspectionUnit( + name=event_file, + generator=generator, + field_to_obs=get_field_to_observations_map(generator, tag), + ) + ] + return [] + + +def inspect(logdir="", event_file="", tag=""): + """Main function for inspector that prints out a digest of event files. + + Args: + logdir: A log directory that contains event files. + event_file: Or, a particular event file path. + tag: An optional tag name to query for. + + Raises: + ValueError: If neither logdir and event_file are given, or both are given. + """ + print( + PRINT_SEPARATOR + + "Processing event files... (this can take a few minutes)\n" + + PRINT_SEPARATOR + ) + inspection_units = get_inspection_units(logdir, event_file, tag) + + for unit in inspection_units: + if tag: + print("Event statistics for tag {} in {}:".format(tag, unit.name)) + else: + # If the user is not inspecting a particular tag, also print the list of + # all available tags that they can query. + print("These tags are in {}:".format(unit.name)) + print_dict(get_unique_tags(unit.field_to_obs)) + print(PRINT_SEPARATOR) + print("Event statistics for {}:".format(unit.name)) + + print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag)) + print(PRINT_SEPARATOR) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_file_loader.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_file_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..38c626ca287e5c1a079b93723a76abe77257a56d --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_file_loader.py @@ -0,0 +1,293 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functionality for loading events from a record file.""" + +import contextlib + +from tensorboard import data_compat +from tensorboard import dataclass_compat +from tensorboard.compat import tf +from tensorboard.compat.proto import event_pb2 +from tensorboard.util import platform_util +from tensorboard.util import tb_logging + + +logger = tb_logging.get_logger() + + +@contextlib.contextmanager +def _nullcontext(): + """Pre-Python-3.7-compatible standin for contextlib.nullcontext.""" + yield + + +# Might as well make this a singleton. +_NULLCONTEXT = _nullcontext() + + +def _silence_deprecation_warnings(): + """Context manager that best-effort silences TF deprecation warnings.""" + try: + # Learn this one weird trick to make TF deprecation warnings go away. + from tensorflow.python.util import deprecation + + return deprecation.silence() + except (ImportError, AttributeError): + return _NULLCONTEXT + + +def _make_tf_record_iterator(file_path): + """Returns an iterator over TF records for the given tfrecord file.""" + # If we don't have TF at all, use the stub implementation. + if tf.__version__ == "stub": + # TODO(#1711): Reshape stub implementation to fit tf_record_iterator API + # rather than needlessly emulating the old PyRecordReader_New API. + logger.debug("Opening a stub record reader pointing at %s", file_path) + return _PyRecordReaderIterator( + tf.pywrap_tensorflow.PyRecordReader_New, file_path + ) + # If PyRecordReader exists, use it, otherwise use tf_record_iterator(). + # Check old first, then new, since tf_record_iterator existed previously but + # only gained the semantics we need at the time PyRecordReader was removed. + # + # TODO(#1711): Eventually remove PyRecordReader fallback once we can drop + # support for TF 2.1 and prior, and find a non-deprecated replacement for + # tf.compat.v1.io.tf_record_iterator. + try: + from tensorflow.python import pywrap_tensorflow + + py_record_reader_new = pywrap_tensorflow.PyRecordReader_New + except (ImportError, AttributeError): + py_record_reader_new = None + if py_record_reader_new: + logger.debug("Opening a PyRecordReader pointing at %s", file_path) + return _PyRecordReaderIterator(py_record_reader_new, file_path) + else: + logger.debug("Opening a tf_record_iterator pointing at %s", file_path) + # TODO(#1711): Find non-deprecated replacement for tf_record_iterator. + with _silence_deprecation_warnings(): + return tf.compat.v1.io.tf_record_iterator(file_path) + + +class _PyRecordReaderIterator: + """Python iterator for TF Records based on PyRecordReader.""" + + def __init__(self, py_record_reader_new, file_path): + """Constructs a _PyRecordReaderIterator for the given file path. + + Args: + py_record_reader_new: pywrap_tensorflow.PyRecordReader_New + file_path: file path of the tfrecord file to read + """ + with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status: + self._reader = py_record_reader_new( + tf.compat.as_bytes(file_path), 0, tf.compat.as_bytes(""), status + ) + if not self._reader: + raise IOError( + "Failed to open a record reader pointing to %s" % file_path + ) + + def __iter__(self): + return self + + def __next__(self): + try: + self._reader.GetNext() + except tf.errors.OutOfRangeError as e: + raise StopIteration + return self._reader.record() + + next = __next__ # for python2 compatibility + + +class RawEventFileLoader: + """An iterator that yields Event protos as serialized bytestrings.""" + + def __init__(self, file_path, detect_file_replacement=False): + """Constructs a RawEventFileLoader for the given file path. + + Args: + file_path: the event file path to read from + detect_file_replacement: if True, when Load() is called, the loader + will make a stat() call to check the size of the file. If it sees + that the file has grown, it will reopen the file entirely (while + preserving the current offset) before attempting to read from it. + Otherwise, Load() will simply poll at EOF for new data. + """ + if file_path is None: + raise ValueError("A file path is required") + self._file_path = platform_util.readahead_file_path(file_path) + self._detect_file_replacement = detect_file_replacement + self._file_size = None + self._iterator = _make_tf_record_iterator(self._file_path) + if self._detect_file_replacement and not hasattr( + self._iterator, "reopen" + ): + logger.warning( + "File replacement detection requested, but not enabled because " + "TF record iterator impl does not support reopening. This " + "functionality requires TensorFlow 2.9+" + ) + self._detect_file_replacement = False + + def Load(self): + """Loads all new events from disk as raw serialized proto bytestrings. + + Calling Load multiple times in a row will not 'drop' events as long as the + return value is not iterated over. + + Yields: + All event proto bytestrings in the file that have not been yielded yet. + """ + logger.debug("Loading events from %s", self._file_path) + if self._detect_file_replacement: + has_increased = self.CheckForIncreasedFileSize() + # Only act on the file size information if we got a concrete result. + if has_increased is not None: + if has_increased: + logger.debug( + "Reopening %s since file size has changed", + self._file_path, + ) + self._iterator.close() + self._iterator.reopen() + else: + logger.debug( + "Skipping attempt to poll %s since file size has not " + "changed (still %d)", + self._file_path, + self._file_size, + ) + return + while True: + try: + yield next(self._iterator) + except StopIteration: + logger.debug("End of file in %s", self._file_path) + break + except tf.errors.DataLossError as e: + # We swallow partial read exceptions; if the record was truncated + # and a later update completes it, retrying can then resume from + # the same point in the file since the iterator holds the offset. + logger.debug("Truncated record in %s (%s)", self._file_path, e) + break + logger.debug("No more events in %s", self._file_path) + + def CheckForIncreasedFileSize(self): + """Stats the file to get its updated size, returning True if it grew. + + If the stat call fails or reports a smaller size than was previously + seen, then any previously cached size is left unchanged. + + Returns: + boolean or None: True if the file size increased; False if it was + the same or decreased; or None if neither case could be detected + (either because the previous size had not been recorded yet, or + because the stat call for the current size failed). + """ + previous_size = self._file_size + try: + self._file_size = tf.io.gfile.stat(self._file_path).length + except tf.errors.OpError as e: + logger.error("Failed to stat %s: %s", self._file_path, e) + return None + logger.debug( + "Stat on %s got size %d, previous size %s", + self._file_path, + self._file_size, + previous_size, + ) + if previous_size is None: + return None + if self._file_size > previous_size: + return True + if self._file_size < previous_size: + logger.warning( + "File %s shrank from previous size %d to size %d", + self._file_path, + previous_size, + self._file_size, + ) + # In case this was transient, preserve the previously cached size, + # to avoid reporting a spurious increase next time. If the file was + # actually truncated, we can't recover anyway, so just ignore it. + self._file_size = previous_size + return False + + +class LegacyEventFileLoader(RawEventFileLoader): + """An iterator that yields parsed Event protos.""" + + def Load(self): + """Loads all new events from disk. + + Calling Load multiple times in a row will not 'drop' events as long as the + return value is not iterated over. + + Yields: + All events in the file that have not been yielded yet. + """ + for record in super().Load(): + yield event_pb2.Event.FromString(record) + + +class EventFileLoader(LegacyEventFileLoader): + """An iterator that passes events through read-time compat layers. + + Specifically, this includes `data_compat` and `dataclass_compat`. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Track initial metadata for each tag, for `dataclass_compat`. + # This is meant to be tracked per run, not per event file, so + # there is a potential failure case when the second event file + # in a single run has no summary metadata. This only occurs when + # all of the following hold: (a) the events were written with + # the TensorFlow 1.x (not 2.x) writer, (b) the summaries were + # created by `tensorboard.summary.v1` ops and so do not undergo + # `data_compat` transformation, and (c) the file writer was + # reopened by calling `.reopen()` on it, which creates a new + # file but does not clear the tag cache. This is considered + # sufficiently improbable that we don't take extra mitigations. + self._initial_metadata = {} # from tag name to `SummaryMetadata` + + def Load(self): + for event in super().Load(): + event = data_compat.migrate_event(event) + events = dataclass_compat.migrate_event( + event, self._initial_metadata + ) + for event in events: + yield event + + +class TimestampedEventFileLoader(EventFileLoader): + """An iterator that yields (UNIX timestamp float, Event proto) pairs.""" + + def Load(self): + """Loads all new events and their wall time values from disk. + + Calling Load multiple times in a row will not 'drop' events as long as the + return value is not iterated over. + + Yields: + Pairs of (UNIX timestamp float, Event proto) for all events in the file + that have not been yielded yet. + """ + for event in super().Load(): + yield (event.wall_time, event) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_multiplexer.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_multiplexer.py new file mode 100644 index 0000000000000000000000000000000000000000..9aa3bafe1e9a7999666047f493c565e10893d0a2 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_multiplexer.py @@ -0,0 +1,523 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides an interface for working with multiple event files.""" + + +import os +import threading + +from typing import Optional + +from tensorboard.backend.event_processing import directory_watcher +from tensorboard.backend.event_processing import event_accumulator +from tensorboard.backend.event_processing import io_wrapper +from tensorboard.util import tb_logging + + +logger = tb_logging.get_logger() + + +class EventMultiplexer: + """An `EventMultiplexer` manages access to multiple `EventAccumulator`s. + + Each `EventAccumulator` is associated with a `run`, which is a self-contained + TensorFlow execution. The `EventMultiplexer` provides methods for extracting + information about events from multiple `run`s. + + Example usage for loading specific runs from files: + + ```python + x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'}) + x.Reload() + ``` + + Example usage for loading a directory where each subdirectory is a run + + ```python + (eg:) /parent/directory/path/ + /parent/directory/path/run1/ + /parent/directory/path/run1/events.out.tfevents.1001 + /parent/directory/path/run1/events.out.tfevents.1002 + + /parent/directory/path/run2/ + /parent/directory/path/run2/events.out.tfevents.9232 + + /parent/directory/path/run3/ + /parent/directory/path/run3/events.out.tfevents.9232 + x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path') + (which is equivalent to:) + x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...} + ``` + + If you would like to watch `/parent/directory/path`, wait for it to be created + (if necessary) and then periodically pick up new runs, use + `AutoloadingMultiplexer` + @@Tensors + """ + + def __init__( + self, run_path_map=None, size_guidance=None, purge_orphaned_data=True + ): + """Constructor for the `EventMultiplexer`. + + Args: + run_path_map: Dict `{run: path}` which specifies the + name of a run, and the path to find the associated events. If it is + None, then the EventMultiplexer initializes without any runs. + size_guidance: A dictionary mapping from `tagType` to the number of items + to store for each tag of that type. See + `event_accumulator.EventAccumulator` for details. + purge_orphaned_data: Whether to discard any events that were "orphaned" by + a TensorFlow restart. + """ + logger.info("Event Multiplexer initializing.") + self._accumulators_mutex = threading.Lock() + self._accumulators = {} + self._paths = {} + self._reload_called = False + self._size_guidance = ( + size_guidance or event_accumulator.DEFAULT_SIZE_GUIDANCE + ) + self.purge_orphaned_data = purge_orphaned_data + if run_path_map is not None: + logger.info( + "Event Multplexer doing initialization load for %s", + run_path_map, + ) + for run, path in run_path_map.items(): + self.AddRun(path, run) + logger.info("Event Multiplexer done initializing") + + def AddRun(self, path, name=None): + """Add a run to the multiplexer. + + If the name is not specified, it is the same as the path. + + If a run by that name exists, and we are already watching the right path, + do nothing. If we are watching a different path, replace the event + accumulator. + + If `Reload` has been called, it will `Reload` the newly created + accumulators. + + Args: + path: Path to the event files (or event directory) for given run. + name: Name of the run to add. If not provided, is set to path. + + Returns: + The `EventMultiplexer`. + """ + name = name or path + accumulator = None + with self._accumulators_mutex: + if name not in self._accumulators or self._paths[name] != path: + if name in self._paths and self._paths[name] != path: + # TODO(@decentralion) - Make it impossible to overwrite an old path + # with a new path (just give the new path a distinct name) + logger.warning( + "Conflict for name %s: old path %s, new path %s", + name, + self._paths[name], + path, + ) + logger.info("Constructing EventAccumulator for %s", path) + accumulator = event_accumulator.EventAccumulator( + path, + size_guidance=self._size_guidance, + purge_orphaned_data=self.purge_orphaned_data, + ) + self._accumulators[name] = accumulator + self._paths[name] = path + if accumulator: + if self._reload_called: + accumulator.Reload() + return self + + def AddRunsFromDirectory(self, path, name=None): + """Load runs from a directory; recursively walks subdirectories. + + If path doesn't exist, no-op. This ensures that it is safe to call + `AddRunsFromDirectory` multiple times, even before the directory is made. + + If path is a directory, load event files in the directory (if any exist) and + recursively call AddRunsFromDirectory on any subdirectories. This mean you + can call AddRunsFromDirectory at the root of a tree of event logs and + TensorBoard will load them all. + + If the `EventMultiplexer` is already loaded this will cause + the newly created accumulators to `Reload()`. + Args: + path: A string path to a directory to load runs from. + name: Optionally, what name to apply to the runs. If name is provided + and the directory contains run subdirectories, the name of each subrun + is the concatenation of the parent name and the subdirectory name. If + name is provided and the directory contains event files, then a run + is added called "name" and with the events from the path. + + Raises: + ValueError: If the path exists and isn't a directory. + + Returns: + The `EventMultiplexer`. + """ + logger.info("Starting AddRunsFromDirectory: %s", path) + for subdir in io_wrapper.GetLogdirSubdirectories(path): + logger.info("Adding events from directory %s", subdir) + rpath = os.path.relpath(subdir, path) + subname = os.path.join(name, rpath) if name else rpath + self.AddRun(subdir, name=subname) + logger.info("Done with AddRunsFromDirectory: %s", path) + return self + + def Reload(self): + """Call `Reload` on every `EventAccumulator`.""" + logger.info("Beginning EventMultiplexer.Reload()") + self._reload_called = True + # Build a list so we're safe even if the list of accumulators is modified + # even while we're reloading. + with self._accumulators_mutex: + items = list(self._accumulators.items()) + + names_to_delete = set() + for name, accumulator in items: + try: + accumulator.Reload() + except (OSError, IOError) as e: + logger.error("Unable to reload accumulator '%s': %s", name, e) + except directory_watcher.DirectoryDeletedError: + names_to_delete.add(name) + + with self._accumulators_mutex: + for name in names_to_delete: + logger.warning("Deleting accumulator '%s'", name) + del self._accumulators[name] + logger.info("Finished with EventMultiplexer.Reload()") + return self + + def PluginAssets(self, plugin_name): + """Get index of runs and assets for a given plugin. + + Args: + plugin_name: Name of the plugin we are checking for. + + Returns: + A dictionary that maps from run_name to a list of plugin + assets for that run. + """ + with self._accumulators_mutex: + # To avoid nested locks, we construct a copy of the run-accumulator map + items = list(self._accumulators.items()) + + return {run: accum.PluginAssets(plugin_name) for run, accum in items} + + def RetrievePluginAsset(self, run, plugin_name, asset_name): + """Return the contents for a specific plugin asset from a run. + + Args: + run: The string name of the run. + plugin_name: The string name of a plugin. + asset_name: The string name of an asset. + + Returns: + The string contents of the plugin asset. + + Raises: + KeyError: If the asset is not available. + """ + accumulator = self.GetAccumulator(run) + return accumulator.RetrievePluginAsset(plugin_name, asset_name) + + def FirstEventTimestamp(self, run): + """Return the timestamp of the first event of the given run. + + This may perform I/O if no events have been loaded yet for the run. + + Args: + run: A string name of the run for which the timestamp is retrieved. + + Returns: + The wall_time of the first event of the run, which will typically be + seconds since the epoch. + + Raises: + KeyError: If the run is not found. + ValueError: If the run has no events loaded and there are no events on + disk to load. + """ + accumulator = self.GetAccumulator(run) + return accumulator.FirstEventTimestamp() + + def GetSourceWriter(self, run) -> Optional[str]: + """Returns the source writer name from the first event of the given run. + + Assuming each run has only one source writer. + + Args: + run: A string name of the run from which the event source information + is retrieved. + + Returns: + Name of the writer that wrote the events in the run. + """ + accumulator = self.GetAccumulator(run) + return accumulator.GetSourceWriter() + + def Scalars(self, run, tag): + """Retrieve the scalar events associated with a run and tag. + + Args: + run: A string name of the run for which values are retrieved. + tag: A string name of the tag for which values are retrieved. + + Raises: + KeyError: If the run is not found, or the tag is not available for + the given run. + + Returns: + An array of `event_accumulator.ScalarEvents`. + """ + accumulator = self.GetAccumulator(run) + return accumulator.Scalars(tag) + + def Graph(self, run): + """Retrieve the graph associated with the provided run. + + Args: + run: A string name of a run to load the graph for. + + Raises: + KeyError: If the run is not found. + ValueError: If the run does not have an associated graph. + + Returns: + The `GraphDef` protobuf data structure. + """ + accumulator = self.GetAccumulator(run) + return accumulator.Graph() + + def SerializedGraph(self, run): + """Retrieve the serialized graph associated with the provided run. + + Args: + run: A string name of a run to load the graph for. + + Raises: + KeyError: If the run is not found. + ValueError: If the run does not have an associated graph. + + Returns: + The serialized form of the `GraphDef` protobuf data structure. + """ + accumulator = self.GetAccumulator(run) + return accumulator.SerializedGraph() + + def MetaGraph(self, run): + """Retrieve the metagraph associated with the provided run. + + Args: + run: A string name of a run to load the graph for. + + Raises: + KeyError: If the run is not found. + ValueError: If the run does not have an associated graph. + + Returns: + The `MetaGraphDef` protobuf data structure. + """ + accumulator = self.GetAccumulator(run) + return accumulator.MetaGraph() + + def RunMetadata(self, run, tag): + """Get the session.run() metadata associated with a TensorFlow run and + tag. + + Args: + run: A string name of a TensorFlow run. + tag: A string name of the tag associated with a particular session.run(). + + Raises: + KeyError: If the run is not found, or the tag is not available for the + given run. + + Returns: + The metadata in the form of `RunMetadata` protobuf data structure. + """ + accumulator = self.GetAccumulator(run) + return accumulator.RunMetadata(tag) + + def Histograms(self, run, tag): + """Retrieve the histogram events associated with a run and tag. + + Args: + run: A string name of the run for which values are retrieved. + tag: A string name of the tag for which values are retrieved. + + Raises: + KeyError: If the run is not found, or the tag is not available for + the given run. + + Returns: + An array of `event_accumulator.HistogramEvents`. + """ + accumulator = self.GetAccumulator(run) + return accumulator.Histograms(tag) + + def CompressedHistograms(self, run, tag): + """Retrieve the compressed histogram events associated with a run and + tag. + + Args: + run: A string name of the run for which values are retrieved. + tag: A string name of the tag for which values are retrieved. + + Raises: + KeyError: If the run is not found, or the tag is not available for + the given run. + + Returns: + An array of `event_accumulator.CompressedHistogramEvents`. + """ + accumulator = self.GetAccumulator(run) + return accumulator.CompressedHistograms(tag) + + def Images(self, run, tag): + """Retrieve the image events associated with a run and tag. + + Args: + run: A string name of the run for which values are retrieved. + tag: A string name of the tag for which values are retrieved. + + Raises: + KeyError: If the run is not found, or the tag is not available for + the given run. + + Returns: + An array of `event_accumulator.ImageEvents`. + """ + accumulator = self.GetAccumulator(run) + return accumulator.Images(tag) + + def Audio(self, run, tag): + """Retrieve the audio events associated with a run and tag. + + Args: + run: A string name of the run for which values are retrieved. + tag: A string name of the tag for which values are retrieved. + + Raises: + KeyError: If the run is not found, or the tag is not available for + the given run. + + Returns: + An array of `event_accumulator.AudioEvents`. + """ + accumulator = self.GetAccumulator(run) + return accumulator.Audio(tag) + + def Tensors(self, run, tag): + """Retrieve the tensor events associated with a run and tag. + + Args: + run: A string name of the run for which values are retrieved. + tag: A string name of the tag for which values are retrieved. + + Raises: + KeyError: If the run is not found, or the tag is not available for + the given run. + + Returns: + An array of `event_accumulator.TensorEvent`s. + """ + accumulator = self.GetAccumulator(run) + return accumulator.Tensors(tag) + + def PluginRunToTagToContent(self, plugin_name): + """Returns a 2-layer dictionary of the form {run: {tag: content}}. + + The `content` referred above is the content field of the PluginData proto + for the specified plugin within a Summary.Value proto. + + Args: + plugin_name: The name of the plugin for which to fetch content. + + Returns: + A dictionary of the form {run: {tag: content}}. + """ + mapping = {} + for run in self.Runs(): + try: + tag_to_content = self.GetAccumulator(run).PluginTagToContent( + plugin_name + ) + except KeyError: + # This run lacks content for the plugin. Try the next run. + continue + mapping[run] = tag_to_content + return mapping + + def SummaryMetadata(self, run, tag): + """Return the summary metadata for the given tag on the given run. + + Args: + run: A string name of the run for which summary metadata is to be + retrieved. + tag: A string name of the tag whose summary metadata is to be + retrieved. + + Raises: + KeyError: If the run is not found, or the tag is not available for + the given run. + + Returns: + A `SummaryMetadata` protobuf. + """ + accumulator = self.GetAccumulator(run) + return accumulator.SummaryMetadata(tag) + + def Runs(self): + """Return all the run names in the `EventMultiplexer`. + + Returns: + ``` + {runName: { images: [tag1, tag2, tag3], + scalarValues: [tagA, tagB, tagC], + histograms: [tagX, tagY, tagZ], + compressedHistograms: [tagX, tagY, tagZ], + graph: true, meta_graph: true}} + ``` + """ + with self._accumulators_mutex: + # To avoid nested locks, we construct a copy of the run-accumulator map + items = list(self._accumulators.items()) + return {run_name: accumulator.Tags() for run_name, accumulator in items} + + def RunPaths(self): + """Returns a dict mapping run names to event file paths.""" + return self._paths + + def GetAccumulator(self, run): + """Returns EventAccumulator for a given run. + + Args: + run: String name of run. + + Returns: + An EventAccumulator object. + + Raises: + KeyError: If run does not exist. + """ + with self._accumulators_mutex: + return self._accumulators[run] diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_util.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_util.py new file mode 100644 index 0000000000000000000000000000000000000000..62b8481c019b4d52dff4e550f0413efa1cb8799e --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_util.py @@ -0,0 +1,68 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functionality for processing events.""" + +from typing import Optional + +from tensorboard.compat.proto import event_pb2 +from tensorboard.util import tb_logging + +logger = tb_logging.get_logger() + +# Maxmimum length for event writer name. +_MAX_WRITER_NAME_LEN = 128 + + +def ParseFileVersion(file_version: str) -> float: + """Convert the string file_version in event.proto into a float. + + Args: + file_version: String file_version from event.proto + + Returns: + Version number as a float. + """ + tokens = file_version.split("brain.Event:") + try: + return float(tokens[-1]) + except ValueError: + ## This should never happen according to the definition of file_version + ## specified in event.proto. + logger.warning( + ( + "Invalid event.proto file_version. Defaulting to use of " + "out-of-order event.step logic for purging expired events." + ) + ) + return -1 + + +def GetSourceWriter( + source_metadata: event_pb2.SourceMetadata, +) -> Optional[str]: + """Gets the source writer name from the source metadata proto.""" + writer_name = source_metadata.writer + if not writer_name: + return None + # Checks the length of the writer name. + if len(writer_name) > _MAX_WRITER_NAME_LEN: + logger.error( + "Source writer name `%s` is too long, maximum allowed length is %d.", + writer_name, + _MAX_WRITER_NAME_LEN, + ) + return None + return writer_name diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/plugin_asset_util.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/plugin_asset_util.py new file mode 100644 index 0000000000000000000000000000000000000000..b0d224170058226ce0f622e055ccc943776e0b5e --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/plugin_asset_util.py @@ -0,0 +1,105 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Load plugin assets from disk.""" + +import os.path + +from tensorboard.compat import tf + + +_PLUGINS_DIR = "plugins" + + +def _IsDirectory(parent, item): + """Helper that returns if parent/item is a directory.""" + return tf.io.gfile.isdir(os.path.join(parent, item)) + + +def PluginDirectory(logdir, plugin_name): + """Returns the plugin directory for plugin_name.""" + return os.path.join(logdir, _PLUGINS_DIR, plugin_name) + + +def ListPlugins(logdir): + """List all the plugins that have registered assets in logdir. + + If the plugins_dir does not exist, it returns an empty list. This maintains + compatibility with old directories that have no plugins written. + + Args: + logdir: A directory that was created by a TensorFlow events writer. + + Returns: + a list of plugin names, as strings + """ + plugins_dir = os.path.join(logdir, _PLUGINS_DIR) + try: + entries = tf.io.gfile.listdir(plugins_dir) + except tf.errors.NotFoundError: + return [] + # Strip trailing slashes, which listdir() includes for some filesystems + # for subdirectories, after using them to bypass IsDirectory(). + return [ + x.rstrip("/") + for x in entries + if x.endswith("/") or _IsDirectory(plugins_dir, x) + ] + + +def ListAssets(logdir, plugin_name): + """List all the assets that are available for given plugin in a logdir. + + Args: + logdir: A directory that was created by a TensorFlow summary.FileWriter. + plugin_name: A string name of a plugin to list assets for. + + Returns: + A string list of available plugin assets. If the plugin subdirectory does + not exist (either because the logdir doesn't exist, or because the plugin + didn't register) an empty list is returned. + """ + plugin_dir = PluginDirectory(logdir, plugin_name) + try: + # Strip trailing slashes, which listdir() includes for some filesystems. + return [x.rstrip("/") for x in tf.io.gfile.listdir(plugin_dir)] + except tf.errors.NotFoundError: + return [] + + +def RetrieveAsset(logdir, plugin_name, asset_name): + """Retrieve a particular plugin asset from a logdir. + + Args: + logdir: A directory that was created by a TensorFlow summary.FileWriter. + plugin_name: The plugin we want an asset from. + asset_name: The name of the requested asset. + + Returns: + string contents of the plugin asset. + + Raises: + KeyError: if the asset does not exist. + """ + + asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name) + try: + with tf.io.gfile.GFile(asset_path, "r") as f: + return f.read() + except tf.errors.NotFoundError: + raise KeyError("Asset path %s not found" % asset_path) + except tf.errors.OpError as e: + raise KeyError( + "Couldn't read asset path: %s, OpError %s" % (asset_path, e) + ) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/plugin_event_accumulator.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/plugin_event_accumulator.py new file mode 100644 index 0000000000000000000000000000000000000000..ff7f6475ef98c69ffe34edf5f42c1c58cf386ec9 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/plugin_event_accumulator.py @@ -0,0 +1,722 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Takes a generator of values, and accumulates them for a frontend.""" + +import collections +import dataclasses +import threading + +from typing import Optional + +from tensorboard.backend.event_processing import directory_loader +from tensorboard.backend.event_processing import directory_watcher +from tensorboard.backend.event_processing import event_file_loader +from tensorboard.backend.event_processing import event_util +from tensorboard.backend.event_processing import io_wrapper +from tensorboard.backend.event_processing import plugin_asset_util +from tensorboard.backend.event_processing import reservoir +from tensorboard.backend.event_processing import tag_types +from tensorboard.compat.proto import config_pb2 +from tensorboard.compat.proto import event_pb2 +from tensorboard.compat.proto import graph_pb2 +from tensorboard.compat.proto import meta_graph_pb2 +from tensorboard.compat.proto import tensor_pb2 +from tensorboard.util import tb_logging + + +logger = tb_logging.get_logger() + + +# Legacy aliases +TENSORS = tag_types.TENSORS +GRAPH = tag_types.GRAPH +META_GRAPH = tag_types.META_GRAPH +RUN_METADATA = tag_types.RUN_METADATA + +DEFAULT_SIZE_GUIDANCE = { + TENSORS: 500, +} + +STORE_EVERYTHING_SIZE_GUIDANCE = { + TENSORS: 0, +} + +_TENSOR_RESERVOIR_KEY = "." # arbitrary + + +@dataclasses.dataclass(frozen=True) +class TensorEvent: + """A tensor event. + + Attributes: + wall_time: Timestamp of the event in seconds. + step: Global step of the event. + tensor_proto: A `TensorProto`. + """ + + wall_time: float + step: int + tensor_proto: tensor_pb2.TensorProto + + +class EventAccumulator: + """An `EventAccumulator` takes an event generator, and accumulates the + values. + + The `EventAccumulator` is intended to provide a convenient Python + interface for loading Event data written during a TensorFlow run. + TensorFlow writes out `Event` protobuf objects, which have a timestamp + and step number, and often contain a `Summary`. Summaries can have + different kinds of data stored as arbitrary tensors. The Summaries + also have a tag, which we use to organize logically related data. The + `EventAccumulator` supports retrieving the `Event` and `Summary` data + by its tag. + + Calling `Tags()` gets a map from `tagType` (i.e., `tensors`) to the + associated tags for those data types. Then, the functional endpoint + (i.g., `Accumulator.Tensors(tag)`) allows for the retrieval of all + data associated with that tag. + + The `Reload()` method synchronously loads all of the data written so far. + + Fields: + most_recent_step: Step of last Event proto added. This should only + be accessed from the thread that calls Reload. This is -1 if + nothing has been loaded yet. + most_recent_wall_time: Timestamp of last Event proto added. This is + a float containing seconds from the UNIX epoch, or -1 if + nothing has been loaded yet. This should only be accessed from + the thread that calls Reload. + path: A file path to a directory containing tf events files, or a single + tf events file. The accumulator will load events from this path. + tensors_by_tag: A dictionary mapping each tag name to a + reservoir.Reservoir of tensor summaries. Each such reservoir will + only use a single key, given by `_TENSOR_RESERVOIR_KEY`. + + @@Tensors + """ + + def __init__( + self, + path, + size_guidance=None, + tensor_size_guidance=None, + purge_orphaned_data=True, + event_file_active_filter=None, + detect_file_replacement=None, + ): + """Construct the `EventAccumulator`. + + Args: + path: A file path to a directory containing tf events files, or a single + tf events file. The accumulator will load events from this path. + size_guidance: Information on how much data the EventAccumulator should + store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much + so as to avoid OOMing the client. The size_guidance should be a map + from a `tagType` string to an integer representing the number of + items to keep per tag for items of that `tagType`. If the size is 0, + all events are stored. + tensor_size_guidance: Like `size_guidance`, but allowing finer + granularity for tensor summaries. Should be a map from the + `plugin_name` field on the `PluginData` proto to an integer + representing the number of items to keep per tag. Plugins for + which there is no entry in this map will default to the value of + `size_guidance[event_accumulator.TENSORS]`. Defaults to `{}`. + purge_orphaned_data: Whether to discard any events that were "orphaned" by + a TensorFlow restart. + event_file_active_filter: Optional predicate for determining whether an + event file latest load timestamp should be considered active. If passed, + this will enable multifile directory loading. + detect_file_replacement: Optional boolean; if True, event file loading + will try to detect when a file has been replaced with a new version + that contains additional data, by monitoring the file size. + """ + size_guidance = dict(size_guidance or DEFAULT_SIZE_GUIDANCE) + sizes = {} + for key in DEFAULT_SIZE_GUIDANCE: + if key in size_guidance: + sizes[key] = size_guidance[key] + else: + sizes[key] = DEFAULT_SIZE_GUIDANCE[key] + self._size_guidance = size_guidance + self._tensor_size_guidance = dict(tensor_size_guidance or {}) + + self._first_event_timestamp = None + + self._graph = None + self._graph_from_metagraph = False + self._meta_graph = None + self._tagged_metadata = {} + self.summary_metadata = {} + self.tensors_by_tag = {} + self._tensors_by_tag_lock = threading.Lock() + + # Keep a mapping from plugin name to a dict mapping from tag to plugin data + # content obtained from the SummaryMetadata (metadata field of Value) for + # that plugin (This is not the entire SummaryMetadata proto - only the + # content for that plugin). The SummaryWriter only keeps the content on the + # first event encountered per tag, so we must store that first instance of + # content for each tag. + self._plugin_to_tag_to_content = collections.defaultdict(dict) + # Locks the dict `_plugin_to_tag_to_content` as well as the + # dicts `_plugin_to_tag_to_content[p]` for each `p`. + self._plugin_tag_lock = threading.Lock() + + self.path = path + self._generator = _GeneratorFromPath( + path, event_file_active_filter, detect_file_replacement + ) + self._generator_mutex = threading.Lock() + + self.purge_orphaned_data = purge_orphaned_data + self._seen_session_start = False + + self.most_recent_step = -1 + self.most_recent_wall_time = -1 + self.file_version = None + + # Name of the source writer that writes the event. + self._source_writer = None + + def Reload(self): + """Loads all events added since the last call to `Reload`. + + If `Reload` was never called, loads all events in the file. + + Returns: + The `EventAccumulator`. + """ + with self._generator_mutex: + for event in self._generator.Load(): + self._ProcessEvent(event) + return self + + def PluginAssets(self, plugin_name): + """Return a list of all plugin assets for the given plugin. + + Args: + plugin_name: The string name of a plugin to retrieve assets for. + + Returns: + A list of string plugin asset names, or empty list if none are available. + If the plugin was not registered, an empty list is returned. + """ + return plugin_asset_util.ListAssets(self.path, plugin_name) + + def RetrievePluginAsset(self, plugin_name, asset_name): + """Return the contents of a given plugin asset. + + Args: + plugin_name: The string name of a plugin. + asset_name: The string name of an asset. + + Returns: + The string contents of the plugin asset. + + Raises: + KeyError: If the asset is not available. + """ + return plugin_asset_util.RetrieveAsset( + self.path, plugin_name, asset_name + ) + + def FirstEventTimestamp(self): + """Returns the timestamp in seconds of the first event. + + If the first event has been loaded (either by this method or by `Reload`, + this returns immediately. Otherwise, it will load in the first event. Note + that this means that calling `Reload` will cause this to block until + `Reload` has finished. + + Returns: + The timestamp in seconds of the first event that was loaded. + + Raises: + ValueError: If no events have been loaded and there were no events found + on disk. + """ + if self._first_event_timestamp is not None: + return self._first_event_timestamp + with self._generator_mutex: + try: + event = next(self._generator.Load()) + self._ProcessEvent(event) + return self._first_event_timestamp + + except StopIteration: + raise ValueError("No event timestamp could be found") + + def GetSourceWriter(self) -> Optional[str]: + """Returns the name of the event writer.""" + if self._source_writer is not None: + return self._source_writer + with self._generator_mutex: + try: + event = next(self._generator.Load()) + self._ProcessEvent(event) + return self._source_writer + except StopIteration: + logger.info( + "End of file in %s, no source writer was found.", self.path + ) + + def PluginTagToContent(self, plugin_name): + """Returns a dict mapping tags to content specific to that plugin. + + Args: + plugin_name: The name of the plugin for which to fetch plugin-specific + content. + + Raises: + KeyError: if the plugin name is not found. + + Returns: + A dict mapping tag names to bytestrings of plugin-specific content-- by + convention, in the form of binary serialized protos. + """ + with self._plugin_tag_lock: + if plugin_name not in self._plugin_to_tag_to_content: + raise KeyError("Plugin %r could not be found." % plugin_name) + # Return a snapshot to avoid concurrent mutation and iteration issues. + return dict(self._plugin_to_tag_to_content[plugin_name]) + + def ActivePlugins(self): + """Return a set of plugins with summary data. + + Returns: + The distinct union of `plugin_data.plugin_name` fields from + all the `SummaryMetadata` protos stored in this accumulator. + """ + with self._plugin_tag_lock: + return frozenset(self._plugin_to_tag_to_content) + + def SummaryMetadata(self, tag): + """Given a summary tag name, return the associated metadata object. + + Args: + tag: The name of a tag, as a string. + + Raises: + KeyError: If the tag is not found. + + Returns: + A `SummaryMetadata` protobuf. + """ + return self.summary_metadata[tag] + + def AllSummaryMetadata(self): + """Return summary metadata for all tags. + + Returns: + A dict `d` such that `d[tag]` is a `SummaryMetadata` proto for + the keyed tag. + """ + return dict(self.summary_metadata) + + def _ProcessEvent(self, event): + """Called whenever an event is loaded.""" + if self._first_event_timestamp is None: + self._first_event_timestamp = event.wall_time + + if event.HasField("source_metadata"): + new_source_writer = event_util.GetSourceWriter( + event.source_metadata + ) + if self._source_writer and self._source_writer != new_source_writer: + logger.info( + ( + "Found new source writer for event.proto. " + "Old: {0}, New: {1}" + ).format(self._source_writer, new_source_writer) + ) + self._source_writer = new_source_writer + + if event.HasField("file_version"): + new_file_version = event_util.ParseFileVersion(event.file_version) + if self.file_version and self.file_version != new_file_version: + ## This should not happen. + logger.warning( + ( + "Found new file_version for event.proto. This will " + "affect purging logic for TensorFlow restarts. " + "Old: {0} New: {1}" + ).format(self.file_version, new_file_version) + ) + self.file_version = new_file_version + + self._MaybePurgeOrphanedData(event) + + ## Process the event. + # GraphDef and MetaGraphDef are handled in a special way: + # If no graph_def Event is available, but a meta_graph_def is, and it + # contains a graph_def, then use the meta_graph_def.graph_def as our graph. + # If a graph_def Event is available, always prefer it to the graph_def + # inside the meta_graph_def. + if event.HasField("graph_def"): + if self._graph is not None: + logger.warning( + ( + "Found more than one graph event per run, or there was " + "a metagraph containing a graph_def, as well as one or " + "more graph events. Overwriting the graph with the " + "newest event." + ) + ) + self._graph = event.graph_def + self._graph_from_metagraph = False + elif event.HasField("meta_graph_def"): + if self._meta_graph is not None: + logger.warning( + ( + "Found more than one metagraph event per run. " + "Overwriting the metagraph with the newest event." + ) + ) + self._meta_graph = event.meta_graph_def + if self._graph is None or self._graph_from_metagraph: + # We may have a graph_def in the metagraph. If so, and no + # graph_def is directly available, use this one instead. + meta_graph = meta_graph_pb2.MetaGraphDef() + meta_graph.ParseFromString(self._meta_graph) + if meta_graph.graph_def: + if self._graph is not None: + logger.warning( + ( + "Found multiple metagraphs containing graph_defs," + "but did not find any graph events. Overwriting the " + "graph with the newest metagraph version." + ) + ) + self._graph_from_metagraph = True + self._graph = meta_graph.graph_def.SerializeToString() + elif event.HasField("tagged_run_metadata"): + tag = event.tagged_run_metadata.tag + if tag in self._tagged_metadata: + logger.warning( + 'Found more than one "run metadata" event with tag ' + + tag + + ". Overwriting it with the newest event." + ) + self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata + elif event.HasField("summary"): + for value in event.summary.value: + if value.HasField("metadata"): + tag = value.tag + # We only store the first instance of the metadata. This check + # is important: the `FileWriter` does strip metadata from all + # values except the first one per each tag, but a new + # `FileWriter` is created every time a training job stops and + # restarts. Hence, we must also ignore non-initial metadata in + # this logic. + if tag not in self.summary_metadata: + self.summary_metadata[tag] = value.metadata + plugin_data = value.metadata.plugin_data + if plugin_data.plugin_name: + with self._plugin_tag_lock: + self._plugin_to_tag_to_content[ + plugin_data.plugin_name + ][tag] = plugin_data.content + else: + logger.warning( + ( + "This summary with tag %r is oddly not associated with a " + "plugin." + ), + tag, + ) + + if value.HasField("tensor"): + datum = value.tensor + tag = value.tag + if not tag: + # This tensor summary was created using the old method that used + # plugin assets. We must still continue to support it. + tag = value.node_name + self._ProcessTensor(tag, event.wall_time, event.step, datum) + + def Tags(self): + """Return all tags found in the value stream. + + Returns: + A `{tagType: ['list', 'of', 'tags']}` dictionary. + """ + return { + TENSORS: list(self.tensors_by_tag.keys()), + # Use a heuristic: if the metagraph is available, but + # graph is not, then we assume the metagraph contains the graph. + GRAPH: self._graph is not None, + META_GRAPH: self._meta_graph is not None, + RUN_METADATA: list(self._tagged_metadata.keys()), + } + + def Graph(self): + """Return the graph definition, if there is one. + + If the graph is stored directly, return that. If no graph is stored + directly but a metagraph is stored containing a graph, return that. + + Raises: + ValueError: If there is no graph for this run. + + Returns: + The `graph_def` proto. + """ + graph = graph_pb2.GraphDef() + if self._graph is not None: + graph.ParseFromString(self._graph) + return graph + raise ValueError("There is no graph in this EventAccumulator") + + def SerializedGraph(self): + """Return the graph definition in serialized form, if there is one.""" + return self._graph + + def MetaGraph(self): + """Return the metagraph definition, if there is one. + + Raises: + ValueError: If there is no metagraph for this run. + + Returns: + The `meta_graph_def` proto. + """ + if self._meta_graph is None: + raise ValueError("There is no metagraph in this EventAccumulator") + meta_graph = meta_graph_pb2.MetaGraphDef() + meta_graph.ParseFromString(self._meta_graph) + return meta_graph + + def RunMetadata(self, tag): + """Given a tag, return the associated session.run() metadata. + + Args: + tag: A string tag associated with the event. + + Raises: + ValueError: If the tag is not found. + + Returns: + The metadata in form of `RunMetadata` proto. + """ + if tag not in self._tagged_metadata: + raise ValueError("There is no run metadata with this tag name") + + run_metadata = config_pb2.RunMetadata() + run_metadata.ParseFromString(self._tagged_metadata[tag]) + return run_metadata + + def Tensors(self, tag): + """Given a summary tag, return all associated tensors. + + Args: + tag: A string tag associated with the events. + + Raises: + KeyError: If the tag is not found. + + Returns: + An array of `TensorEvent`s. + """ + return self.tensors_by_tag[tag].Items(_TENSOR_RESERVOIR_KEY) + + def _MaybePurgeOrphanedData(self, event): + """Maybe purge orphaned data due to a TensorFlow crash. + + When TensorFlow crashes at step T+O and restarts at step T, any events + written after step T are now "orphaned" and will be at best misleading if + they are included in TensorBoard. + + This logic attempts to determine if there is orphaned data, and purge it + if it is found. + + Args: + event: The event to use as a reference, to determine if a purge is needed. + """ + if not self.purge_orphaned_data: + return + ## Check if the event happened after a crash, and purge expired tags. + if self.file_version and self.file_version >= 2: + ## If the file_version is recent enough, use the SessionLog enum + ## to check for restarts. + self._CheckForRestartAndMaybePurge(event) + else: + ## If there is no file version, default to old logic of checking for + ## out of order steps. + self._CheckForOutOfOrderStepAndMaybePurge(event) + # After checking, update the most recent summary step and wall time. + if event.HasField("summary"): + self.most_recent_step = event.step + self.most_recent_wall_time = event.wall_time + + def _CheckForRestartAndMaybePurge(self, event): + """Check and discard expired events using SessionLog.START. + + The first SessionLog.START event in a run indicates the start of a + supervisor session. Subsequent SessionLog.START events indicate a + *restart*, which may need to preempt old events. This method checks + for a session restart event and purges all previously seen events whose + step is larger than or equal to this event's step. + + Because of supervisor threading, it is possible that this logic will + cause the first few event messages to be discarded since supervisor + threading does not guarantee that the START message is deterministically + written first. + + This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which + can inadvertently discard events due to supervisor threading. + + Args: + event: The event to use as reference. If the event is a START event, all + previously seen events with a greater event.step will be purged. + """ + if event.session_log.status != event_pb2.SessionLog.START: + return + if not self._seen_session_start: + # Initial start event: does not indicate a restart. + self._seen_session_start = True + return + self._Purge(event, by_tags=False) + + def _CheckForOutOfOrderStepAndMaybePurge(self, event): + """Check for out-of-order event.step and discard expired events for + tags. + + Check if the event is out of order relative to the global most recent step. + If it is, purge outdated summaries for tags that the event contains. + + Args: + event: The event to use as reference. If the event is out-of-order, all + events with the same tags, but with a greater event.step will be purged. + """ + if event.step < self.most_recent_step and event.HasField("summary"): + self._Purge(event, by_tags=True) + + def _ProcessTensor(self, tag, wall_time, step, tensor): + tv = TensorEvent(wall_time=wall_time, step=step, tensor_proto=tensor) + with self._tensors_by_tag_lock: + if tag not in self.tensors_by_tag: + reservoir_size = self._GetTensorReservoirSize(tag) + self.tensors_by_tag[tag] = reservoir.Reservoir(reservoir_size) + self.tensors_by_tag[tag].AddItem(_TENSOR_RESERVOIR_KEY, tv) + + def _GetTensorReservoirSize(self, tag): + default = self._size_guidance[TENSORS] + summary_metadata = self.summary_metadata.get(tag) + if summary_metadata is None: + return default + return self._tensor_size_guidance.get( + summary_metadata.plugin_data.plugin_name, default + ) + + def _Purge(self, event, by_tags): + """Purge all events that have occurred after the given event.step. + + If by_tags is True, purge all events that occurred after the given + event.step, but only for the tags that the event has. Non-sequential + event.steps suggest that a TensorFlow restart occurred, and we discard + the out-of-order events to display a consistent view in TensorBoard. + + Discarding by tags is the safer method, when we are unsure whether a restart + has occurred, given that threading in supervisor can cause events of + different tags to arrive with unsynchronized step values. + + If by_tags is False, then purge all events with event.step greater than the + given event.step. This can be used when we are certain that a TensorFlow + restart has occurred and these events can be discarded. + + Args: + event: The event to use as reference for the purge. All events with + the same tags, but with a greater event.step will be purged. + by_tags: Bool to dictate whether to discard all out-of-order events or + only those that are associated with the given reference event. + """ + ## Keep data in reservoirs that has a step less than event.step + _NotExpired = lambda x: x.step < event.step + + num_expired = 0 + if by_tags: + for value in event.summary.value: + if value.tag in self.tensors_by_tag: + tag_reservoir = self.tensors_by_tag[value.tag] + num_expired += tag_reservoir.FilterItems( + _NotExpired, _TENSOR_RESERVOIR_KEY + ) + else: + for tag_reservoir in self.tensors_by_tag.values(): + num_expired += tag_reservoir.FilterItems( + _NotExpired, _TENSOR_RESERVOIR_KEY + ) + if num_expired > 0: + purge_msg = _GetPurgeMessage( + self.most_recent_step, + self.most_recent_wall_time, + event.step, + event.wall_time, + num_expired, + ) + logger.warning(purge_msg) + + +def _GetPurgeMessage( + most_recent_step, + most_recent_wall_time, + event_step, + event_wall_time, + num_expired, +): + """Return the string message associated with TensorBoard purges.""" + return ( + "Detected out of order event.step likely caused by a TensorFlow " + "restart. Purging {} expired tensor events from Tensorboard display " + "between the previous step: {} (timestamp: {}) and current step: {} " + "(timestamp: {})." + ).format( + num_expired, + most_recent_step, + most_recent_wall_time, + event_step, + event_wall_time, + ) + + +def _GeneratorFromPath( + path, event_file_active_filter=None, detect_file_replacement=None +): + """Create an event generator for file or directory at given path string.""" + if not path: + raise ValueError("path must be a valid string") + if io_wrapper.IsSummaryEventsFile(path): + return event_file_loader.EventFileLoader(path, detect_file_replacement) + elif event_file_active_filter: + loader_factory = ( + lambda path: event_file_loader.TimestampedEventFileLoader( + path, detect_file_replacement + ) + ) + return directory_loader.DirectoryLoader( + path, + loader_factory, + path_filter=io_wrapper.IsSummaryEventsFile, + active_filter=event_file_active_filter, + ) + else: + loader_factory = lambda path: event_file_loader.EventFileLoader( + path, detect_file_replacement + ) + return directory_watcher.DirectoryWatcher( + path, + loader_factory, + io_wrapper.IsSummaryEventsFile, + ) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/json_util.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/json_util.py new file mode 100644 index 0000000000000000000000000000000000000000..76577bcebc1ed95cbf3f914adfa74a4c369aecb8 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/json_util.py @@ -0,0 +1,72 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A module providing a function for serializing JSON values with Infinity. + +Python provides no way to override how json.dumps serializes +Infinity/-Infinity/NaN; if allow_nan is true, it encodes them as +Infinity/-Infinity/NaN, in violation of the JSON spec and in violation +of what JSON.parse accepts. If it's false, it throws a ValueError, +Neither subclassing JSONEncoder nor passing a function in the |default| +keyword argument overrides this. +""" + + +import collections +import math + + +_INFINITY = float("inf") +_NEGATIVE_INFINITY = float("-inf") + + +def Cleanse(obj, encoding="utf-8"): + """Makes Python object appropriate for JSON serialization. + + - Replaces instances of Infinity/-Infinity/NaN with strings. + - Turns byte strings into unicode strings. + - Turns sets into sorted lists. + - Turns tuples into lists. + + Args: + obj: Python data structure. + encoding: Charset used to decode byte strings. + + Returns: + Unicode JSON data structure. + """ + if isinstance(obj, int): + return obj + elif isinstance(obj, float): + if obj == _INFINITY: + return "Infinity" + elif obj == _NEGATIVE_INFINITY: + return "-Infinity" + elif math.isnan(obj): + return "NaN" + else: + return obj + elif isinstance(obj, bytes): + return obj.decode(encoding) + elif isinstance(obj, (list, tuple)): + return [Cleanse(i, encoding) for i in obj] + elif isinstance(obj, set): + return [Cleanse(i, encoding) for i in sorted(obj)] + elif isinstance(obj, dict): + return collections.OrderedDict( + (Cleanse(k, encoding), Cleanse(v, encoding)) for k, v in obj.items() + ) + else: + return obj diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/path_prefix.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/path_prefix.py new file mode 100644 index 0000000000000000000000000000000000000000..0aae07bfc29d7d8f41a8866276ce4aaeb903b15b --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/path_prefix.py @@ -0,0 +1,68 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Internal path prefix support for TensorBoard. + +Using a path prefix of `/foo/bar` enables TensorBoard to serve from +`http://localhost:6006/foo/bar/` rather than `http://localhost:6006/`. +See the `--path_prefix` flag docs for more details. +""" + + +from tensorboard import errors + + +class PathPrefixMiddleware: + """WSGI middleware for path prefixes. + + All requests to this middleware must begin with the specified path + prefix (otherwise, a 404 will be returned immediately). Requests + will be forwarded to the underlying application with the path prefix + stripped and appended to `SCRIPT_NAME` (see the WSGI spec, PEP 3333, + for details). + """ + + def __init__(self, application, path_prefix): + """Initializes this middleware. + + Args: + application: The WSGI application to wrap (see PEP 3333). + path_prefix: A string path prefix to be stripped from incoming + requests. If empty, this middleware is a no-op. If non-empty, + the path prefix must start with a slash and not end with one + (e.g., "/tensorboard"). + """ + if path_prefix.endswith("/"): + raise ValueError( + "Path prefix must not end with slash: %r" % path_prefix + ) + if path_prefix and not path_prefix.startswith("/"): + raise ValueError( + "Non-empty path prefix must start with slash: %r" % path_prefix + ) + self._application = application + self._path_prefix = path_prefix + self._strict_prefix = self._path_prefix + "/" + + def __call__(self, environ, start_response): + path = environ.get("PATH_INFO", "") + if path != self._path_prefix and not path.startswith( + self._strict_prefix + ): + raise errors.NotFoundError() + environ["PATH_INFO"] = path[len(self._path_prefix) :] + environ["SCRIPT_NAME"] = ( + environ.get("SCRIPT_NAME", "") + self._path_prefix + ) + return self._application(environ, start_response) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__init__.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/__init__.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17f59dc1ef0b9dda97a0be6e5f44ae6eed239be2 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/allocation_description_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/allocation_description_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a3a6f1763d39f43ee4ffa996a6e4a2013caf810 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/allocation_description_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/api_def_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/api_def_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03235e3bd79183f7a8a2571f1b57d1415aedb29c Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/api_def_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/attr_value_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/attr_value_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fc8340f15c2cd4e151d3810f0f124b6a223723f Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/attr_value_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cluster_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cluster_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61306dd308a352f0adcdbb0c6d76cfeb01948470 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cluster_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/config_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/config_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ad779c15377c8c156a3390abfa5731f37f85710 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/config_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/coordination_config_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/coordination_config_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e937b138d3093b237b19188c9f54bbe0f5914a5b Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/coordination_config_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cost_graph_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cost_graph_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c702114e71f294d2075dd9f41faa858aa543086 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cost_graph_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cpp_shape_inference_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cpp_shape_inference_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ff54c672153faa490b6f561c9f02d696682ae66 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cpp_shape_inference_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/debug_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/debug_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6aa96f2e64823fa54d8dca51376502cd7959348 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/debug_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/event_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/event_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfccfeaa0a1b34f5bfcb1f809fc94807e80b4545 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/event_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/full_type_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/full_type_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0321539dcbdb8b0d16125ace167bc83d7a74cb75 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/full_type_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/function_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/function_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d54bd150f5f9649ce795b473c7e6c0eb222bb479 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/function_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/graph_debug_info_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/graph_debug_info_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0a920e0251e1de61dee9439bcf428c40efccf79 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/graph_debug_info_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/graph_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/graph_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f590121abef2d7285da86c437ad609beff49733d Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/graph_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/histogram_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/histogram_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa8a3404b5f8bda77f46a8589998ac194e904044 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/histogram_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/meta_graph_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/meta_graph_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..575e904b722fcffea4a1d0c60556c3a7da64b54d Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/meta_graph_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/node_def_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/node_def_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdc485a078c0cec20a585331983ffe5328a6176b Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/node_def_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/op_def_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/op_def_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f0694df22824a8e3de50d1d174299178538c6aa Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/op_def_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/resource_handle_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/resource_handle_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8883d9d5009f813f8da4c600499fc2130c6e79c Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/resource_handle_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/rewriter_config_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/rewriter_config_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c76a92465a40a0d743a7ccd4e739fa6b5610ee9 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/rewriter_config_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/rpc_options_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/rpc_options_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a87ef65894048a3c58b4ecb5ee6da6f8d0826495 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/rpc_options_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/saved_object_graph_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/saved_object_graph_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5d7a35016f3d2d03a52859c95a4f0dc9bfb1399 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/saved_object_graph_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/saver_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/saver_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddfe87cf2b118c5d2a8ee3687bd134b18f75c1ac Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/saver_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/step_stats_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/step_stats_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65bbda1e2616ec566aebd0743f24cefa15c8f2ce Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/step_stats_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/struct_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/struct_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..984e7134ddbc1f7d8b5e41c682f5a1dfc38f0c20 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/struct_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/summary_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/summary_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c772741da5d38d6badd129d15d6339c98812de22 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/summary_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tensor_description_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tensor_description_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..540b2911cbf37c786200457e4bd5395552d4d10a Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tensor_description_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tensor_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tensor_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..098bff8b5e1d156ba7eb12b0390f66e45ae2ac29 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tensor_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tensor_shape_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tensor_shape_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85213b36aa632ed4104aa02333ddb3ed8ff71ca0 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tensor_shape_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tfprof_log_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tfprof_log_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..579dee779c48985b2e619bab6c7d39cc7d85f6bb Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/tfprof_log_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/trackable_object_graph_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/trackable_object_graph_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d6a82976ea5c00e4f44978a2009b028c3d22bd6 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/trackable_object_graph_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/types_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/types_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0b3058de90af83bb67ab3228f4e8923360f0c07 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/types_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/variable_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/variable_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c1014b35d672365eccd1b9a922b0b27c3b1b5bf Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/variable_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/verifier_config_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/verifier_config_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e453f608b088d29da6422e43c5bec205f5d428e6 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/verifier_config_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/versions_pb2.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/versions_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e69e5e0e411c779183fc8ba23abf073f6715699 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/versions_pb2.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/allocation_description_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/allocation_description_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..cf261f4a4865c8911a4e39a20b54c4d6a3e17403 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/allocation_description_pb2.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/allocation_description.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n5tensorboard/compat/proto/allocation_description.proto\x12\x0btensorboard\"\xa3\x01\n\x15\x41llocationDescription\x12\x17\n\x0frequested_bytes\x18\x01 \x01(\x03\x12\x17\n\x0f\x61llocated_bytes\x18\x02 \x01(\x03\x12\x16\n\x0e\x61llocator_name\x18\x03 \x01(\t\x12\x15\n\rallocation_id\x18\x04 \x01(\x03\x12\x1c\n\x14has_single_reference\x18\x05 \x01(\x08\x12\x0b\n\x03ptr\x18\x06 \x01(\x04\x42\x9b\x01\n\x18org.tensorflow.frameworkB\x1b\x41llocationDescriptionProtosP\x01Z]github.com/tensorflow/tensorflow/tensorflow/go/core/framework/allocation_description_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_ALLOCATIONDESCRIPTION = DESCRIPTOR.message_types_by_name['AllocationDescription'] +AllocationDescription = _reflection.GeneratedProtocolMessageType('AllocationDescription', (_message.Message,), { + 'DESCRIPTOR' : _ALLOCATIONDESCRIPTION, + '__module__' : 'tensorboard.compat.proto.allocation_description_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.AllocationDescription) + }) +_sym_db.RegisterMessage(AllocationDescription) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\033AllocationDescriptionProtosP\001Z]github.com/tensorflow/tensorflow/tensorflow/go/core/framework/allocation_description_go_proto\370\001\001' + _ALLOCATIONDESCRIPTION._serialized_start=71 + _ALLOCATIONDESCRIPTION._serialized_end=234 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/api_def_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/api_def_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..2f438e7d07179ffb323fcd7ee6da45e315db4471 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/api_def_pb2.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/api_def.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import attr_value_pb2 as tensorboard_dot_compat_dot_proto_dot_attr__value__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&tensorboard/compat/proto/api_def.proto\x12\x0btensorboard\x1a)tensorboard/compat/proto/attr_value.proto\"\xe7\x05\n\x06\x41piDef\x12\x15\n\rgraph_op_name\x18\x01 \x01(\t\x12\x1b\n\x13\x64\x65precation_message\x18\x0c \x01(\t\x12\x1b\n\x13\x64\x65precation_version\x18\r \x01(\x05\x12\x32\n\nvisibility\x18\x02 \x01(\x0e\x32\x1e.tensorboard.ApiDef.Visibility\x12.\n\x08\x65ndpoint\x18\x03 \x03(\x0b\x32\x1c.tensorboard.ApiDef.Endpoint\x12\'\n\x06in_arg\x18\x04 \x03(\x0b\x32\x17.tensorboard.ApiDef.Arg\x12(\n\x07out_arg\x18\x05 \x03(\x0b\x32\x17.tensorboard.ApiDef.Arg\x12\x11\n\targ_order\x18\x0b \x03(\t\x12&\n\x04\x61ttr\x18\x06 \x03(\x0b\x32\x18.tensorboard.ApiDef.Attr\x12\x0f\n\x07summary\x18\x07 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x08 \x01(\t\x12\x1a\n\x12\x64\x65scription_prefix\x18\t \x01(\t\x12\x1a\n\x12\x64\x65scription_suffix\x18\n \x01(\t\x1aI\n\x08\x45ndpoint\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ndeprecated\x18\x03 \x01(\x08\x12\x1b\n\x13\x64\x65precation_version\x18\x04 \x01(\x05\x1a;\n\x03\x41rg\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\trename_to\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x1ak\n\x04\x41ttr\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\trename_to\x18\x02 \x01(\t\x12-\n\rdefault_value\x18\x03 \x01(\x0b\x32\x16.tensorboard.AttrValue\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\"G\n\nVisibility\x12\x16\n\x12\x44\x45\x46\x41ULT_VISIBILITY\x10\x00\x12\x0b\n\x07VISIBLE\x10\x01\x12\x08\n\x04SKIP\x10\x02\x12\n\n\x06HIDDEN\x10\x03\"*\n\x07\x41piDefs\x12\x1f\n\x02op\x18\x01 \x03(\x0b\x32\x13.tensorboard.ApiDefB}\n\x18org.tensorflow.frameworkB\x0c\x41piDefProtosP\x01ZNgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/api_def_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_APIDEF = DESCRIPTOR.message_types_by_name['ApiDef'] +_APIDEF_ENDPOINT = _APIDEF.nested_types_by_name['Endpoint'] +_APIDEF_ARG = _APIDEF.nested_types_by_name['Arg'] +_APIDEF_ATTR = _APIDEF.nested_types_by_name['Attr'] +_APIDEFS = DESCRIPTOR.message_types_by_name['ApiDefs'] +_APIDEF_VISIBILITY = _APIDEF.enum_types_by_name['Visibility'] +ApiDef = _reflection.GeneratedProtocolMessageType('ApiDef', (_message.Message,), { + + 'Endpoint' : _reflection.GeneratedProtocolMessageType('Endpoint', (_message.Message,), { + 'DESCRIPTOR' : _APIDEF_ENDPOINT, + '__module__' : 'tensorboard.compat.proto.api_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ApiDef.Endpoint) + }) + , + + 'Arg' : _reflection.GeneratedProtocolMessageType('Arg', (_message.Message,), { + 'DESCRIPTOR' : _APIDEF_ARG, + '__module__' : 'tensorboard.compat.proto.api_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ApiDef.Arg) + }) + , + + 'Attr' : _reflection.GeneratedProtocolMessageType('Attr', (_message.Message,), { + 'DESCRIPTOR' : _APIDEF_ATTR, + '__module__' : 'tensorboard.compat.proto.api_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ApiDef.Attr) + }) + , + 'DESCRIPTOR' : _APIDEF, + '__module__' : 'tensorboard.compat.proto.api_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ApiDef) + }) +_sym_db.RegisterMessage(ApiDef) +_sym_db.RegisterMessage(ApiDef.Endpoint) +_sym_db.RegisterMessage(ApiDef.Arg) +_sym_db.RegisterMessage(ApiDef.Attr) + +ApiDefs = _reflection.GeneratedProtocolMessageType('ApiDefs', (_message.Message,), { + 'DESCRIPTOR' : _APIDEFS, + '__module__' : 'tensorboard.compat.proto.api_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ApiDefs) + }) +_sym_db.RegisterMessage(ApiDefs) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\014ApiDefProtosP\001ZNgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/api_def_go_proto\370\001\001' + _APIDEF._serialized_start=99 + _APIDEF._serialized_end=842 + _APIDEF_ENDPOINT._serialized_start=526 + _APIDEF_ENDPOINT._serialized_end=599 + _APIDEF_ARG._serialized_start=601 + _APIDEF_ARG._serialized_end=660 + _APIDEF_ATTR._serialized_start=662 + _APIDEF_ATTR._serialized_end=769 + _APIDEF_VISIBILITY._serialized_start=771 + _APIDEF_VISIBILITY._serialized_end=842 + _APIDEFS._serialized_start=844 + _APIDEFS._serialized_end=886 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/cluster_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/cluster_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..05a13f33906e60037c2888d03746a1e3b8161af8 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/cluster_pb2.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/cluster.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&tensorboard/compat/proto/cluster.proto\x12\x0btensorboard\"s\n\x06JobDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x05tasks\x18\x02 \x03(\x0b\x32\x1e.tensorboard.JobDef.TasksEntry\x1a,\n\nTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\".\n\nClusterDef\x12 \n\x03job\x18\x01 \x03(\x0b\x32\x13.tensorboard.JobDefB\x87\x01\n\x1aorg.tensorflow.distruntimeB\rClusterProtosP\x01ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_JOBDEF = DESCRIPTOR.message_types_by_name['JobDef'] +_JOBDEF_TASKSENTRY = _JOBDEF.nested_types_by_name['TasksEntry'] +_CLUSTERDEF = DESCRIPTOR.message_types_by_name['ClusterDef'] +JobDef = _reflection.GeneratedProtocolMessageType('JobDef', (_message.Message,), { + + 'TasksEntry' : _reflection.GeneratedProtocolMessageType('TasksEntry', (_message.Message,), { + 'DESCRIPTOR' : _JOBDEF_TASKSENTRY, + '__module__' : 'tensorboard.compat.proto.cluster_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.JobDef.TasksEntry) + }) + , + 'DESCRIPTOR' : _JOBDEF, + '__module__' : 'tensorboard.compat.proto.cluster_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.JobDef) + }) +_sym_db.RegisterMessage(JobDef) +_sym_db.RegisterMessage(JobDef.TasksEntry) + +ClusterDef = _reflection.GeneratedProtocolMessageType('ClusterDef', (_message.Message,), { + 'DESCRIPTOR' : _CLUSTERDEF, + '__module__' : 'tensorboard.compat.proto.cluster_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ClusterDef) + }) +_sym_db.RegisterMessage(ClusterDef) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\032org.tensorflow.distruntimeB\rClusterProtosP\001ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\370\001\001' + _JOBDEF_TASKSENTRY._options = None + _JOBDEF_TASKSENTRY._serialized_options = b'8\001' + _JOBDEF._serialized_start=55 + _JOBDEF._serialized_end=170 + _JOBDEF_TASKSENTRY._serialized_start=126 + _JOBDEF_TASKSENTRY._serialized_end=170 + _CLUSTERDEF._serialized_start=172 + _CLUSTERDEF._serialized_end=218 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/config_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/config_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..f23edb7157aa4e337902cdd4c441452083ad68a1 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/config_pb2.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/config.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import cost_graph_pb2 as tensorboard_dot_compat_dot_proto_dot_cost__graph__pb2 +from tensorboard.compat.proto import graph_pb2 as tensorboard_dot_compat_dot_proto_dot_graph__pb2 +from tensorboard.compat.proto import step_stats_pb2 as tensorboard_dot_compat_dot_proto_dot_step__stats__pb2 +from tensorboard.compat.proto import cluster_pb2 as tensorboard_dot_compat_dot_proto_dot_cluster__pb2 +from tensorboard.compat.proto import debug_pb2 as tensorboard_dot_compat_dot_proto_dot_debug__pb2 +from tensorboard.compat.proto import rewriter_config_pb2 as tensorboard_dot_compat_dot_proto_dot_rewriter__config__pb2 +from tensorboard.compat.proto import rpc_options_pb2 as tensorboard_dot_compat_dot_proto_dot_rpc__options__pb2 +from tensorboard.compat.proto import coordination_config_pb2 as tensorboard_dot_compat_dot_proto_dot_coordination__config__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%tensorboard/compat/proto/config.proto\x12\x0btensorboard\x1a)tensorboard/compat/proto/cost_graph.proto\x1a$tensorboard/compat/proto/graph.proto\x1a)tensorboard/compat/proto/step_stats.proto\x1a&tensorboard/compat/proto/cluster.proto\x1a$tensorboard/compat/proto/debug.proto\x1a.tensorboard/compat/proto/rewriter_config.proto\x1a*tensorboard/compat/proto/rpc_options.proto\x1a\x32tensorboard/compat/proto/coordination_config.proto\"\x8c\n\n\nGPUOptions\x12\'\n\x1fper_process_gpu_memory_fraction\x18\x01 \x01(\x01\x12\x14\n\x0c\x61llow_growth\x18\x04 \x01(\x08\x12\x16\n\x0e\x61llocator_type\x18\x02 \x01(\t\x12\x1f\n\x17\x64\x65\x66\x65rred_deletion_bytes\x18\x03 \x01(\x03\x12\x1b\n\x13visible_device_list\x18\x05 \x01(\t\x12\"\n\x1apolling_active_delay_usecs\x18\x06 \x01(\x05\x12$\n\x1cpolling_inactive_delay_msecs\x18\x07 \x01(\x05\x12\x1c\n\x14\x66orce_gpu_compatible\x18\x08 \x01(\x08\x12:\n\x0c\x65xperimental\x18\t \x01(\x0b\x32$.tensorboard.GPUOptions.Experimental\x1a\xc4\x07\n\x0c\x45xperimental\x12L\n\x0fvirtual_devices\x18\x01 \x03(\x0b\x32\x33.tensorboard.GPUOptions.Experimental.VirtualDevices\x12#\n\x1bnum_virtual_devices_per_gpu\x18\x0f \x01(\x05\x12\x1a\n\x12use_unified_memory\x18\x02 \x01(\x08\x12#\n\x1bnum_dev_to_dev_copy_streams\x18\x03 \x01(\x05\x12\x1d\n\x15\x63ollective_ring_order\x18\x04 \x01(\t\x12\x1d\n\x15timestamped_allocator\x18\x05 \x01(\x08\x12#\n\x1bkernel_tracker_max_interval\x18\x07 \x01(\x05\x12 \n\x18kernel_tracker_max_bytes\x18\x08 \x01(\x05\x12\"\n\x1akernel_tracker_max_pending\x18\t \x01(\x05\x12\'\n\x1finternal_fragmentation_fraction\x18\n \x01(\x01\x12\x1d\n\x15use_cuda_malloc_async\x18\x0b \x01(\x08\x12,\n$disallow_retry_on_allocation_failure\x18\x0c \x01(\x08\x12 \n\x18gpu_host_mem_limit_in_mb\x18\r \x01(\x02\x12$\n\x1cgpu_host_mem_disallow_growth\x18\x0e \x01(\x08\x12$\n\x1cgpu_system_memory_size_in_mb\x18\x10 \x01(\x05\x12.\n&populate_pjrt_gpu_client_creation_info\x18\x11 \x01(\x08\x12\x0f\n\x07node_id\x18\x12 \x01(\x05\x12U\n\x14stream_merge_options\x18\x13 \x01(\x0b\x32\x37.tensorboard.GPUOptions.Experimental.StreamMergeOptions\x1aS\n\x0eVirtualDevices\x12\x17\n\x0fmemory_limit_mb\x18\x01 \x03(\x02\x12\x10\n\x08priority\x18\x02 \x03(\x05\x12\x16\n\x0e\x64\x65vice_ordinal\x18\x03 \x03(\x05\x1a\x85\x01\n\x12StreamMergeOptions\x12#\n\x1bmerge_host_to_device_stream\x18\x01 \x01(\x08\x12#\n\x1bmerge_device_to_host_stream\x18\x02 \x01(\x08\x12%\n\x1dmerge_device_to_device_stream\x18\x03 \x01(\x08\"\x9f\x03\n\x10OptimizerOptions\x12+\n#do_common_subexpression_elimination\x18\x01 \x01(\x08\x12\x1b\n\x13\x64o_constant_folding\x18\x02 \x01(\x08\x12$\n\x1cmax_folded_constant_in_bytes\x18\x06 \x01(\x03\x12\x1c\n\x14\x64o_function_inlining\x18\x04 \x01(\x08\x12\x36\n\topt_level\x18\x03 \x01(\x0e\x32#.tensorboard.OptimizerOptions.Level\x12\x46\n\x10global_jit_level\x18\x05 \x01(\x0e\x32,.tensorboard.OptimizerOptions.GlobalJitLevel\x12\x16\n\x0e\x63pu_global_jit\x18\x07 \x01(\x08\" \n\x05Level\x12\x06\n\x02L1\x10\x00\x12\x0f\n\x02L0\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"C\n\x0eGlobalJitLevel\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x10\n\x03OFF\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x12\x08\n\x04ON_1\x10\x01\x12\x08\n\x04ON_2\x10\x02\"\xf0\x02\n\x0cGraphOptions\x12\x1e\n\x16\x65nable_recv_scheduling\x18\x02 \x01(\x08\x12\x38\n\x11optimizer_options\x18\x03 \x01(\x0b\x32\x1d.tensorboard.OptimizerOptions\x12\x18\n\x10\x62uild_cost_model\x18\x04 \x01(\x03\x12\x1e\n\x16\x62uild_cost_model_after\x18\t \x01(\x03\x12\x14\n\x0cinfer_shapes\x18\x05 \x01(\x08\x12\x1a\n\x12place_pruned_graph\x18\x06 \x01(\x08\x12 \n\x18\x65nable_bfloat16_sendrecv\x18\x07 \x01(\x08\x12\x15\n\rtimeline_step\x18\x08 \x01(\x05\x12\x34\n\x0frewrite_options\x18\n \x01(\x0b\x32\x1b.tensorboard.RewriterConfigJ\x04\x08\x01\x10\x02R%skip_common_subexpression_elimination\"A\n\x15ThreadPoolOptionProto\x12\x13\n\x0bnum_threads\x18\x01 \x01(\x05\x12\x13\n\x0bglobal_name\x18\x02 \x01(\t\"0\n\x0fSessionMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\"\xf1\x10\n\x0b\x43onfigProto\x12?\n\x0c\x64\x65vice_count\x18\x01 \x03(\x0b\x32).tensorboard.ConfigProto.DeviceCountEntry\x12$\n\x1cintra_op_parallelism_threads\x18\x02 \x01(\x05\x12$\n\x1cinter_op_parallelism_threads\x18\x05 \x01(\x05\x12\x1f\n\x17use_per_session_threads\x18\t \x01(\x08\x12H\n\x1csession_inter_op_thread_pool\x18\x0c \x03(\x0b\x32\".tensorboard.ThreadPoolOptionProto\x12\x18\n\x10placement_period\x18\x03 \x01(\x05\x12\x16\n\x0e\x64\x65vice_filters\x18\x04 \x03(\t\x12,\n\x0bgpu_options\x18\x06 \x01(\x0b\x32\x17.tensorboard.GPUOptions\x12\x39\n\x18pluggable_device_options\x18\x12 \x01(\x0b\x32\x17.tensorboard.GPUOptions\x12\x1c\n\x14\x61llow_soft_placement\x18\x07 \x01(\x08\x12\x1c\n\x14log_device_placement\x18\x08 \x01(\x08\x12\x30\n\rgraph_options\x18\n \x01(\x0b\x32\x19.tensorboard.GraphOptions\x12\x1f\n\x17operation_timeout_in_ms\x18\x0b \x01(\x03\x12,\n\x0brpc_options\x18\r \x01(\x0b\x32\x17.tensorboard.RPCOptions\x12,\n\x0b\x63luster_def\x18\x0e \x01(\x0b\x32\x17.tensorboard.ClusterDef\x12\x1d\n\x15isolate_session_state\x18\x0f \x01(\x08\x12(\n share_cluster_devices_in_session\x18\x11 \x01(\x08\x12;\n\x0c\x65xperimental\x18\x10 \x01(\x0b\x32%.tensorboard.ConfigProto.Experimental\x1a\x32\n\x10\x44\x65viceCountEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\xa9\n\n\x0c\x45xperimental\x12\x1f\n\x17\x63ollective_group_leader\x18\x01 \x01(\t\x12\x15\n\rexecutor_type\x18\x03 \x01(\t\x12\x1a\n\x12recv_buf_max_chunk\x18\x04 \x01(\x05\x12\x19\n\x11use_numa_affinity\x18\x05 \x01(\x08\x12\x35\n-collective_deterministic_sequential_execution\x18\x06 \x01(\x08\x12\x17\n\x0f\x63ollective_nccl\x18\x07 \x01(\x08\x12\x36\n.share_session_state_in_clusterspec_propagation\x18\x08 \x01(\x08\x12\x1f\n\x17\x64isable_thread_spinning\x18\t \x01(\x08\x12(\n share_cluster_devices_in_session\x18\n \x01(\x08\x12\x36\n\x10session_metadata\x18\x0b \x01(\x0b\x32\x1c.tensorboard.SessionMetadata\x12!\n\x19optimize_for_static_graph\x18\x0c \x01(\x08\x12\x1a\n\x12\x65nable_mlir_bridge\x18\r \x01(\x08\x12T\n\x13mlir_bridge_rollout\x18\x11 \x01(\x0e\x32\x37.tensorboard.ConfigProto.Experimental.MlirBridgeRollout\x12&\n\x1e\x65nable_mlir_graph_optimization\x18\x10 \x01(\x08\x12\'\n\x1f\x64isable_output_partition_graphs\x18\x0e \x01(\x08\x12#\n\x1bxla_fusion_autotuner_thresh\x18\x0f \x01(\x03\x12\x10\n\x08use_tfrt\x18\x12 \x01(\x08\x12\x19\n\x11\x65nable_multi_host\x18\x1b \x01(\x08\x12\x15\n\rtfrt_use_ifrt\x18 \x01(\x08\x12\x1b\n\x13\x62\x61\x63kend_server_port\x18\x1c \x01(\x05\x12\x12\n\ntarget_tpu\x18\x1d \x01(\x08\x12\x12\n\ntarget_gpu\x18\x1e \x01(\x08\x12\x1e\n\x16stream_merge_threshold\x18\x1f \x01(\x05\x12\'\n\x1f\x64isable_functional_ops_lowering\x18\x15 \x01(\x08\x12\'\n\x1fxla_prefer_single_graph_cluster\x18\x16 \x01(\x08\x12\x43\n\x13\x63oordination_config\x18\x17 \x01(\x0b\x32&.tensorboard.CoordinationServiceConfig\x12)\n!disable_optimize_for_static_graph\x18\x18 \x01(\x08\x12\x30\n(disable_eager_executor_streaming_enqueue\x18\x1a \x01(\x08\"\xde\x01\n\x11MlirBridgeRollout\x12#\n\x1fMLIR_BRIDGE_ROLLOUT_UNSPECIFIED\x10\x00\x12\x1f\n\x1bMLIR_BRIDGE_ROLLOUT_ENABLED\x10\x01\x12 \n\x1cMLIR_BRIDGE_ROLLOUT_DISABLED\x10\x02\"\x04\x08\x03\x10\x03\"\x04\x08\x04\x10\x04*%MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED*.MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLEDJ\x04\x08\x02\x10\x03J\x04\x08\x13\x10\x14J\x04\x08\x14\x10\x15J\x04\x08\x19\x10\x1a\"\xe5\x04\n\nRunOptions\x12\x37\n\x0btrace_level\x18\x01 \x01(\x0e\x32\".tensorboard.RunOptions.TraceLevel\x12\x15\n\rtimeout_in_ms\x18\x02 \x01(\x03\x12\x1c\n\x14inter_op_thread_pool\x18\x03 \x01(\x05\x12\x1f\n\x17output_partition_graphs\x18\x05 \x01(\x08\x12\x30\n\rdebug_options\x18\x06 \x01(\x0b\x32\x19.tensorboard.DebugOptions\x12*\n\"report_tensor_allocations_upon_oom\x18\x07 \x01(\x08\x12:\n\x0c\x65xperimental\x18\x08 \x01(\x0b\x32$.tensorboard.RunOptions.Experimental\x1a\xd3\x01\n\x0c\x45xperimental\x12\x1c\n\x14\x63ollective_graph_key\x18\x01 \x01(\x03\x12\x1c\n\x14use_run_handler_pool\x18\x02 \x01(\x08\x12\\\n\x18run_handler_pool_options\x18\x03 \x01(\x0b\x32:.tensorboard.RunOptions.Experimental.RunHandlerPoolOptions\x1a)\n\x15RunHandlerPoolOptions\x12\x10\n\x08priority\x18\x01 \x01(\x03\"R\n\nTraceLevel\x12\x0c\n\x08NO_TRACE\x10\x00\x12\x12\n\x0eSOFTWARE_TRACE\x10\x01\x12\x12\n\x0eHARDWARE_TRACE\x10\x02\x12\x0e\n\nFULL_TRACE\x10\x03J\x04\x08\x04\x10\x05\"\xc6\x03\n\x0bRunMetadata\x12*\n\nstep_stats\x18\x01 \x01(\x0b\x32\x16.tensorboard.StepStats\x12-\n\ncost_graph\x18\x02 \x01(\x0b\x32\x19.tensorboard.CostGraphDef\x12/\n\x10partition_graphs\x18\x03 \x03(\x0b\x32\x15.tensorboard.GraphDef\x12@\n\x0f\x66unction_graphs\x18\x04 \x03(\x0b\x32\'.tensorboard.RunMetadata.FunctionGraphs\x12\x36\n\x10session_metadata\x18\x05 \x01(\x0b\x32\x1c.tensorboard.SessionMetadata\x1a\xb0\x01\n\x0e\x46unctionGraphs\x12/\n\x10partition_graphs\x18\x01 \x03(\x0b\x32\x15.tensorboard.GraphDef\x12\x35\n\x16pre_optimization_graph\x18\x02 \x01(\x0b\x32\x15.tensorboard.GraphDef\x12\x36\n\x17post_optimization_graph\x18\x03 \x01(\x0b\x32\x15.tensorboard.GraphDef\":\n\x10TensorConnection\x12\x13\n\x0b\x66rom_tensor\x18\x01 \x01(\t\x12\x11\n\tto_tensor\x18\x02 \x01(\t\"\xb4\x03\n\x0f\x43\x61llableOptions\x12\x0c\n\x04\x66\x65\x65\x64\x18\x01 \x03(\t\x12\r\n\x05\x66\x65tch\x18\x02 \x03(\t\x12\x0e\n\x06target\x18\x03 \x03(\t\x12,\n\x0brun_options\x18\x04 \x01(\x0b\x32\x17.tensorboard.RunOptions\x12\x38\n\x11tensor_connection\x18\x05 \x03(\x0b\x32\x1d.tensorboard.TensorConnection\x12\x43\n\x0c\x66\x65\x65\x64_devices\x18\x06 \x03(\x0b\x32-.tensorboard.CallableOptions.FeedDevicesEntry\x12\x45\n\rfetch_devices\x18\x07 \x03(\x0b\x32..tensorboard.CallableOptions.FetchDevicesEntry\x12\x17\n\x0f\x66\x65tch_skip_sync\x18\x08 \x01(\x08\x1a\x32\n\x10\x46\x65\x65\x64\x44\x65vicesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x33\n\x11\x46\x65tchDevicesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x84\x01\n\x18org.tensorflow.frameworkB\x0c\x43onfigProtosP\x01ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_GPUOPTIONS = DESCRIPTOR.message_types_by_name['GPUOptions'] +_GPUOPTIONS_EXPERIMENTAL = _GPUOPTIONS.nested_types_by_name['Experimental'] +_GPUOPTIONS_EXPERIMENTAL_VIRTUALDEVICES = _GPUOPTIONS_EXPERIMENTAL.nested_types_by_name['VirtualDevices'] +_GPUOPTIONS_EXPERIMENTAL_STREAMMERGEOPTIONS = _GPUOPTIONS_EXPERIMENTAL.nested_types_by_name['StreamMergeOptions'] +_OPTIMIZEROPTIONS = DESCRIPTOR.message_types_by_name['OptimizerOptions'] +_GRAPHOPTIONS = DESCRIPTOR.message_types_by_name['GraphOptions'] +_THREADPOOLOPTIONPROTO = DESCRIPTOR.message_types_by_name['ThreadPoolOptionProto'] +_SESSIONMETADATA = DESCRIPTOR.message_types_by_name['SessionMetadata'] +_CONFIGPROTO = DESCRIPTOR.message_types_by_name['ConfigProto'] +_CONFIGPROTO_DEVICECOUNTENTRY = _CONFIGPROTO.nested_types_by_name['DeviceCountEntry'] +_CONFIGPROTO_EXPERIMENTAL = _CONFIGPROTO.nested_types_by_name['Experimental'] +_RUNOPTIONS = DESCRIPTOR.message_types_by_name['RunOptions'] +_RUNOPTIONS_EXPERIMENTAL = _RUNOPTIONS.nested_types_by_name['Experimental'] +_RUNOPTIONS_EXPERIMENTAL_RUNHANDLERPOOLOPTIONS = _RUNOPTIONS_EXPERIMENTAL.nested_types_by_name['RunHandlerPoolOptions'] +_RUNMETADATA = DESCRIPTOR.message_types_by_name['RunMetadata'] +_RUNMETADATA_FUNCTIONGRAPHS = _RUNMETADATA.nested_types_by_name['FunctionGraphs'] +_TENSORCONNECTION = DESCRIPTOR.message_types_by_name['TensorConnection'] +_CALLABLEOPTIONS = DESCRIPTOR.message_types_by_name['CallableOptions'] +_CALLABLEOPTIONS_FEEDDEVICESENTRY = _CALLABLEOPTIONS.nested_types_by_name['FeedDevicesEntry'] +_CALLABLEOPTIONS_FETCHDEVICESENTRY = _CALLABLEOPTIONS.nested_types_by_name['FetchDevicesEntry'] +_OPTIMIZEROPTIONS_LEVEL = _OPTIMIZEROPTIONS.enum_types_by_name['Level'] +_OPTIMIZEROPTIONS_GLOBALJITLEVEL = _OPTIMIZEROPTIONS.enum_types_by_name['GlobalJitLevel'] +_CONFIGPROTO_EXPERIMENTAL_MLIRBRIDGEROLLOUT = _CONFIGPROTO_EXPERIMENTAL.enum_types_by_name['MlirBridgeRollout'] +_RUNOPTIONS_TRACELEVEL = _RUNOPTIONS.enum_types_by_name['TraceLevel'] +GPUOptions = _reflection.GeneratedProtocolMessageType('GPUOptions', (_message.Message,), { + + 'Experimental' : _reflection.GeneratedProtocolMessageType('Experimental', (_message.Message,), { + + 'VirtualDevices' : _reflection.GeneratedProtocolMessageType('VirtualDevices', (_message.Message,), { + 'DESCRIPTOR' : _GPUOPTIONS_EXPERIMENTAL_VIRTUALDEVICES, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GPUOptions.Experimental.VirtualDevices) + }) + , + + 'StreamMergeOptions' : _reflection.GeneratedProtocolMessageType('StreamMergeOptions', (_message.Message,), { + 'DESCRIPTOR' : _GPUOPTIONS_EXPERIMENTAL_STREAMMERGEOPTIONS, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GPUOptions.Experimental.StreamMergeOptions) + }) + , + 'DESCRIPTOR' : _GPUOPTIONS_EXPERIMENTAL, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GPUOptions.Experimental) + }) + , + 'DESCRIPTOR' : _GPUOPTIONS, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GPUOptions) + }) +_sym_db.RegisterMessage(GPUOptions) +_sym_db.RegisterMessage(GPUOptions.Experimental) +_sym_db.RegisterMessage(GPUOptions.Experimental.VirtualDevices) +_sym_db.RegisterMessage(GPUOptions.Experimental.StreamMergeOptions) + +OptimizerOptions = _reflection.GeneratedProtocolMessageType('OptimizerOptions', (_message.Message,), { + 'DESCRIPTOR' : _OPTIMIZEROPTIONS, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.OptimizerOptions) + }) +_sym_db.RegisterMessage(OptimizerOptions) + +GraphOptions = _reflection.GeneratedProtocolMessageType('GraphOptions', (_message.Message,), { + 'DESCRIPTOR' : _GRAPHOPTIONS, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GraphOptions) + }) +_sym_db.RegisterMessage(GraphOptions) + +ThreadPoolOptionProto = _reflection.GeneratedProtocolMessageType('ThreadPoolOptionProto', (_message.Message,), { + 'DESCRIPTOR' : _THREADPOOLOPTIONPROTO, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ThreadPoolOptionProto) + }) +_sym_db.RegisterMessage(ThreadPoolOptionProto) + +SessionMetadata = _reflection.GeneratedProtocolMessageType('SessionMetadata', (_message.Message,), { + 'DESCRIPTOR' : _SESSIONMETADATA, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SessionMetadata) + }) +_sym_db.RegisterMessage(SessionMetadata) + +ConfigProto = _reflection.GeneratedProtocolMessageType('ConfigProto', (_message.Message,), { + + 'DeviceCountEntry' : _reflection.GeneratedProtocolMessageType('DeviceCountEntry', (_message.Message,), { + 'DESCRIPTOR' : _CONFIGPROTO_DEVICECOUNTENTRY, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ConfigProto.DeviceCountEntry) + }) + , + + 'Experimental' : _reflection.GeneratedProtocolMessageType('Experimental', (_message.Message,), { + 'DESCRIPTOR' : _CONFIGPROTO_EXPERIMENTAL, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ConfigProto.Experimental) + }) + , + 'DESCRIPTOR' : _CONFIGPROTO, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ConfigProto) + }) +_sym_db.RegisterMessage(ConfigProto) +_sym_db.RegisterMessage(ConfigProto.DeviceCountEntry) +_sym_db.RegisterMessage(ConfigProto.Experimental) + +RunOptions = _reflection.GeneratedProtocolMessageType('RunOptions', (_message.Message,), { + + 'Experimental' : _reflection.GeneratedProtocolMessageType('Experimental', (_message.Message,), { + + 'RunHandlerPoolOptions' : _reflection.GeneratedProtocolMessageType('RunHandlerPoolOptions', (_message.Message,), { + 'DESCRIPTOR' : _RUNOPTIONS_EXPERIMENTAL_RUNHANDLERPOOLOPTIONS, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.RunOptions.Experimental.RunHandlerPoolOptions) + }) + , + 'DESCRIPTOR' : _RUNOPTIONS_EXPERIMENTAL, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.RunOptions.Experimental) + }) + , + 'DESCRIPTOR' : _RUNOPTIONS, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.RunOptions) + }) +_sym_db.RegisterMessage(RunOptions) +_sym_db.RegisterMessage(RunOptions.Experimental) +_sym_db.RegisterMessage(RunOptions.Experimental.RunHandlerPoolOptions) + +RunMetadata = _reflection.GeneratedProtocolMessageType('RunMetadata', (_message.Message,), { + + 'FunctionGraphs' : _reflection.GeneratedProtocolMessageType('FunctionGraphs', (_message.Message,), { + 'DESCRIPTOR' : _RUNMETADATA_FUNCTIONGRAPHS, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.RunMetadata.FunctionGraphs) + }) + , + 'DESCRIPTOR' : _RUNMETADATA, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.RunMetadata) + }) +_sym_db.RegisterMessage(RunMetadata) +_sym_db.RegisterMessage(RunMetadata.FunctionGraphs) + +TensorConnection = _reflection.GeneratedProtocolMessageType('TensorConnection', (_message.Message,), { + 'DESCRIPTOR' : _TENSORCONNECTION, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.TensorConnection) + }) +_sym_db.RegisterMessage(TensorConnection) + +CallableOptions = _reflection.GeneratedProtocolMessageType('CallableOptions', (_message.Message,), { + + 'FeedDevicesEntry' : _reflection.GeneratedProtocolMessageType('FeedDevicesEntry', (_message.Message,), { + 'DESCRIPTOR' : _CALLABLEOPTIONS_FEEDDEVICESENTRY, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CallableOptions.FeedDevicesEntry) + }) + , + + 'FetchDevicesEntry' : _reflection.GeneratedProtocolMessageType('FetchDevicesEntry', (_message.Message,), { + 'DESCRIPTOR' : _CALLABLEOPTIONS_FETCHDEVICESENTRY, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CallableOptions.FetchDevicesEntry) + }) + , + 'DESCRIPTOR' : _CALLABLEOPTIONS, + '__module__' : 'tensorboard.compat.proto.config_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CallableOptions) + }) +_sym_db.RegisterMessage(CallableOptions) +_sym_db.RegisterMessage(CallableOptions.FeedDevicesEntry) +_sym_db.RegisterMessage(CallableOptions.FetchDevicesEntry) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\014ConfigProtosP\001ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\370\001\001' + _CONFIGPROTO_DEVICECOUNTENTRY._options = None + _CONFIGPROTO_DEVICECOUNTENTRY._serialized_options = b'8\001' + _CALLABLEOPTIONS_FEEDDEVICESENTRY._options = None + _CALLABLEOPTIONS_FEEDDEVICESENTRY._serialized_options = b'8\001' + _CALLABLEOPTIONS_FETCHDEVICESENTRY._options = None + _CALLABLEOPTIONS_FETCHDEVICESENTRY._serialized_options = b'8\001' + _GPUOPTIONS._serialized_start=401 + _GPUOPTIONS._serialized_end=1693 + _GPUOPTIONS_EXPERIMENTAL._serialized_start=729 + _GPUOPTIONS_EXPERIMENTAL._serialized_end=1693 + _GPUOPTIONS_EXPERIMENTAL_VIRTUALDEVICES._serialized_start=1474 + _GPUOPTIONS_EXPERIMENTAL_VIRTUALDEVICES._serialized_end=1557 + _GPUOPTIONS_EXPERIMENTAL_STREAMMERGEOPTIONS._serialized_start=1560 + _GPUOPTIONS_EXPERIMENTAL_STREAMMERGEOPTIONS._serialized_end=1693 + _OPTIMIZEROPTIONS._serialized_start=1696 + _OPTIMIZEROPTIONS._serialized_end=2111 + _OPTIMIZEROPTIONS_LEVEL._serialized_start=2010 + _OPTIMIZEROPTIONS_LEVEL._serialized_end=2042 + _OPTIMIZEROPTIONS_GLOBALJITLEVEL._serialized_start=2044 + _OPTIMIZEROPTIONS_GLOBALJITLEVEL._serialized_end=2111 + _GRAPHOPTIONS._serialized_start=2114 + _GRAPHOPTIONS._serialized_end=2482 + _THREADPOOLOPTIONPROTO._serialized_start=2484 + _THREADPOOLOPTIONPROTO._serialized_end=2549 + _SESSIONMETADATA._serialized_start=2551 + _SESSIONMETADATA._serialized_end=2599 + _CONFIGPROTO._serialized_start=2602 + _CONFIGPROTO._serialized_end=4763 + _CONFIGPROTO_DEVICECOUNTENTRY._serialized_start=3389 + _CONFIGPROTO_DEVICECOUNTENTRY._serialized_end=3439 + _CONFIGPROTO_EXPERIMENTAL._serialized_start=3442 + _CONFIGPROTO_EXPERIMENTAL._serialized_end=4763 + _CONFIGPROTO_EXPERIMENTAL_MLIRBRIDGEROLLOUT._serialized_start=4517 + _CONFIGPROTO_EXPERIMENTAL_MLIRBRIDGEROLLOUT._serialized_end=4739 + _RUNOPTIONS._serialized_start=4766 + _RUNOPTIONS._serialized_end=5379 + _RUNOPTIONS_EXPERIMENTAL._serialized_start=5078 + _RUNOPTIONS_EXPERIMENTAL._serialized_end=5289 + _RUNOPTIONS_EXPERIMENTAL_RUNHANDLERPOOLOPTIONS._serialized_start=5248 + _RUNOPTIONS_EXPERIMENTAL_RUNHANDLERPOOLOPTIONS._serialized_end=5289 + _RUNOPTIONS_TRACELEVEL._serialized_start=5291 + _RUNOPTIONS_TRACELEVEL._serialized_end=5373 + _RUNMETADATA._serialized_start=5382 + _RUNMETADATA._serialized_end=5836 + _RUNMETADATA_FUNCTIONGRAPHS._serialized_start=5660 + _RUNMETADATA_FUNCTIONGRAPHS._serialized_end=5836 + _TENSORCONNECTION._serialized_start=5838 + _TENSORCONNECTION._serialized_end=5896 + _CALLABLEOPTIONS._serialized_start=5899 + _CALLABLEOPTIONS._serialized_end=6335 + _CALLABLEOPTIONS_FEEDDEVICESENTRY._serialized_start=6232 + _CALLABLEOPTIONS_FEEDDEVICESENTRY._serialized_end=6282 + _CALLABLEOPTIONS_FETCHDEVICESENTRY._serialized_start=6284 + _CALLABLEOPTIONS_FETCHDEVICESENTRY._serialized_end=6335 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/cost_graph_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/cost_graph_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..661f14807a1a1c4df80d9c48a1a51f9117d4b273 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/cost_graph_pb2.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/cost_graph.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import tensor_shape_pb2 as tensorboard_dot_compat_dot_proto_dot_tensor__shape__pb2 +from tensorboard.compat.proto import types_pb2 as tensorboard_dot_compat_dot_proto_dot_types__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)tensorboard/compat/proto/cost_graph.proto\x12\x0btensorboard\x1a+tensorboard/compat/proto/tensor_shape.proto\x1a$tensorboard/compat/proto/types.proto\"\xd0\x06\n\x0c\x43ostGraphDef\x12,\n\x04node\x18\x01 \x03(\x0b\x32\x1e.tensorboard.CostGraphDef.Node\x12\x36\n\x04\x63ost\x18\x02 \x03(\x0b\x32(.tensorboard.CostGraphDef.AggregatedCost\x1a\xa6\x05\n\x04Node\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x02 \x01(\t\x12\n\n\x02id\x18\x03 \x01(\x05\x12<\n\ninput_info\x18\x04 \x03(\x0b\x32(.tensorboard.CostGraphDef.Node.InputInfo\x12>\n\x0boutput_info\x18\x05 \x03(\x0b\x32).tensorboard.CostGraphDef.Node.OutputInfo\x12\x1d\n\x15temporary_memory_size\x18\x06 \x01(\x03\x12\x1e\n\x16persistent_memory_size\x18\x0c \x01(\x03\x12!\n\x15host_temp_memory_size\x18\n \x01(\x03\x42\x02\x18\x01\x12#\n\x17\x64\x65vice_temp_memory_size\x18\x0b \x01(\x03\x42\x02\x18\x01\x12)\n\x1d\x64\x65vice_persistent_memory_size\x18\x10 \x01(\x03\x42\x02\x18\x01\x12\x14\n\x0c\x63ompute_cost\x18\t \x01(\x03\x12\x14\n\x0c\x63ompute_time\x18\x0e \x01(\x03\x12\x13\n\x0bmemory_time\x18\x0f \x01(\x03\x12\x10\n\x08is_final\x18\x07 \x01(\x08\x12\x15\n\rcontrol_input\x18\x08 \x03(\x05\x12\x12\n\ninaccurate\x18\x11 \x01(\x08\x1a;\n\tInputInfo\x12\x16\n\x0epreceding_node\x18\x01 \x01(\x05\x12\x16\n\x0epreceding_port\x18\x02 \x01(\x05\x1a\x88\x01\n\nOutputInfo\x12\x0c\n\x04size\x18\x01 \x01(\x03\x12\x18\n\x10\x61lias_input_port\x18\x02 \x01(\x03\x12,\n\x05shape\x18\x03 \x01(\x0b\x32\x1d.tensorboard.TensorShapeProto\x12$\n\x05\x64type\x18\x04 \x01(\x0e\x32\x15.tensorboard.DataType\x1a\x31\n\x0e\x41ggregatedCost\x12\x0c\n\x04\x63ost\x18\x01 \x01(\x02\x12\x11\n\tdimension\x18\x02 \x01(\tB\x83\x01\n\x18org.tensorflow.frameworkB\x0f\x43ostGraphProtosP\x01ZQgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/cost_graph_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_COSTGRAPHDEF = DESCRIPTOR.message_types_by_name['CostGraphDef'] +_COSTGRAPHDEF_NODE = _COSTGRAPHDEF.nested_types_by_name['Node'] +_COSTGRAPHDEF_NODE_INPUTINFO = _COSTGRAPHDEF_NODE.nested_types_by_name['InputInfo'] +_COSTGRAPHDEF_NODE_OUTPUTINFO = _COSTGRAPHDEF_NODE.nested_types_by_name['OutputInfo'] +_COSTGRAPHDEF_AGGREGATEDCOST = _COSTGRAPHDEF.nested_types_by_name['AggregatedCost'] +CostGraphDef = _reflection.GeneratedProtocolMessageType('CostGraphDef', (_message.Message,), { + + 'Node' : _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), { + + 'InputInfo' : _reflection.GeneratedProtocolMessageType('InputInfo', (_message.Message,), { + 'DESCRIPTOR' : _COSTGRAPHDEF_NODE_INPUTINFO, + '__module__' : 'tensorboard.compat.proto.cost_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CostGraphDef.Node.InputInfo) + }) + , + + 'OutputInfo' : _reflection.GeneratedProtocolMessageType('OutputInfo', (_message.Message,), { + 'DESCRIPTOR' : _COSTGRAPHDEF_NODE_OUTPUTINFO, + '__module__' : 'tensorboard.compat.proto.cost_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CostGraphDef.Node.OutputInfo) + }) + , + 'DESCRIPTOR' : _COSTGRAPHDEF_NODE, + '__module__' : 'tensorboard.compat.proto.cost_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CostGraphDef.Node) + }) + , + + 'AggregatedCost' : _reflection.GeneratedProtocolMessageType('AggregatedCost', (_message.Message,), { + 'DESCRIPTOR' : _COSTGRAPHDEF_AGGREGATEDCOST, + '__module__' : 'tensorboard.compat.proto.cost_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CostGraphDef.AggregatedCost) + }) + , + 'DESCRIPTOR' : _COSTGRAPHDEF, + '__module__' : 'tensorboard.compat.proto.cost_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CostGraphDef) + }) +_sym_db.RegisterMessage(CostGraphDef) +_sym_db.RegisterMessage(CostGraphDef.Node) +_sym_db.RegisterMessage(CostGraphDef.Node.InputInfo) +_sym_db.RegisterMessage(CostGraphDef.Node.OutputInfo) +_sym_db.RegisterMessage(CostGraphDef.AggregatedCost) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\017CostGraphProtosP\001ZQgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/cost_graph_go_proto\370\001\001' + _COSTGRAPHDEF_NODE.fields_by_name['host_temp_memory_size']._options = None + _COSTGRAPHDEF_NODE.fields_by_name['host_temp_memory_size']._serialized_options = b'\030\001' + _COSTGRAPHDEF_NODE.fields_by_name['device_temp_memory_size']._options = None + _COSTGRAPHDEF_NODE.fields_by_name['device_temp_memory_size']._serialized_options = b'\030\001' + _COSTGRAPHDEF_NODE.fields_by_name['device_persistent_memory_size']._options = None + _COSTGRAPHDEF_NODE.fields_by_name['device_persistent_memory_size']._serialized_options = b'\030\001' + _COSTGRAPHDEF._serialized_start=142 + _COSTGRAPHDEF._serialized_end=990 + _COSTGRAPHDEF_NODE._serialized_start=261 + _COSTGRAPHDEF_NODE._serialized_end=939 + _COSTGRAPHDEF_NODE_INPUTINFO._serialized_start=741 + _COSTGRAPHDEF_NODE_INPUTINFO._serialized_end=800 + _COSTGRAPHDEF_NODE_OUTPUTINFO._serialized_start=803 + _COSTGRAPHDEF_NODE_OUTPUTINFO._serialized_end=939 + _COSTGRAPHDEF_AGGREGATEDCOST._serialized_start=941 + _COSTGRAPHDEF_AGGREGATEDCOST._serialized_end=990 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/debug_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/debug_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..da788f1a82811f5a9f33c5a5f99afa9369042ed1 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/debug_pb2.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/debug.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$tensorboard/compat/proto/debug.proto\x12\x0btensorboard\"\x8e\x01\n\x10\x44\x65\x62ugTensorWatch\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x13\n\x0boutput_slot\x18\x02 \x01(\x05\x12\x11\n\tdebug_ops\x18\x03 \x03(\t\x12\x12\n\ndebug_urls\x18\x04 \x03(\t\x12+\n#tolerate_debug_op_creation_failures\x18\x05 \x01(\x08\"\x82\x01\n\x0c\x44\x65\x62ugOptions\x12>\n\x17\x64\x65\x62ug_tensor_watch_opts\x18\x04 \x03(\x0b\x32\x1d.tensorboard.DebugTensorWatch\x12\x13\n\x0bglobal_step\x18\n \x01(\x03\x12\x1d\n\x15reset_disk_byte_usage\x18\x0b \x01(\x08\"j\n\x12\x44\x65\x62uggedSourceFile\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x11\n\tfile_path\x18\x02 \x01(\t\x12\x15\n\rlast_modified\x18\x03 \x01(\x03\x12\r\n\x05\x62ytes\x18\x04 \x01(\x03\x12\r\n\x05lines\x18\x05 \x03(\t\"L\n\x13\x44\x65\x62uggedSourceFiles\x12\x35\n\x0csource_files\x18\x01 \x03(\x0b\x32\x1f.tensorboard.DebuggedSourceFileB\x83\x01\n\x18org.tensorflow.frameworkB\x0b\x44\x65\x62ugProtosP\x01ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_DEBUGTENSORWATCH = DESCRIPTOR.message_types_by_name['DebugTensorWatch'] +_DEBUGOPTIONS = DESCRIPTOR.message_types_by_name['DebugOptions'] +_DEBUGGEDSOURCEFILE = DESCRIPTOR.message_types_by_name['DebuggedSourceFile'] +_DEBUGGEDSOURCEFILES = DESCRIPTOR.message_types_by_name['DebuggedSourceFiles'] +DebugTensorWatch = _reflection.GeneratedProtocolMessageType('DebugTensorWatch', (_message.Message,), { + 'DESCRIPTOR' : _DEBUGTENSORWATCH, + '__module__' : 'tensorboard.compat.proto.debug_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.DebugTensorWatch) + }) +_sym_db.RegisterMessage(DebugTensorWatch) + +DebugOptions = _reflection.GeneratedProtocolMessageType('DebugOptions', (_message.Message,), { + 'DESCRIPTOR' : _DEBUGOPTIONS, + '__module__' : 'tensorboard.compat.proto.debug_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.DebugOptions) + }) +_sym_db.RegisterMessage(DebugOptions) + +DebuggedSourceFile = _reflection.GeneratedProtocolMessageType('DebuggedSourceFile', (_message.Message,), { + 'DESCRIPTOR' : _DEBUGGEDSOURCEFILE, + '__module__' : 'tensorboard.compat.proto.debug_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.DebuggedSourceFile) + }) +_sym_db.RegisterMessage(DebuggedSourceFile) + +DebuggedSourceFiles = _reflection.GeneratedProtocolMessageType('DebuggedSourceFiles', (_message.Message,), { + 'DESCRIPTOR' : _DEBUGGEDSOURCEFILES, + '__module__' : 'tensorboard.compat.proto.debug_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.DebuggedSourceFiles) + }) +_sym_db.RegisterMessage(DebuggedSourceFiles) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\013DebugProtosP\001ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\370\001\001' + _DEBUGTENSORWATCH._serialized_start=54 + _DEBUGTENSORWATCH._serialized_end=196 + _DEBUGOPTIONS._serialized_start=199 + _DEBUGOPTIONS._serialized_end=329 + _DEBUGGEDSOURCEFILE._serialized_start=331 + _DEBUGGEDSOURCEFILE._serialized_end=437 + _DEBUGGEDSOURCEFILES._serialized_start=439 + _DEBUGGEDSOURCEFILES._serialized_end=515 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/event_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/event_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..2cc3a7f9285ef635be7fa90146adc5a7d9b9371e --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/event_pb2.py @@ -0,0 +1,149 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/event.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import summary_pb2 as tensorboard_dot_compat_dot_proto_dot_summary__pb2 +try: + tensorboard_dot_compat_dot_proto_dot_histogram__pb2 = tensorboard_dot_compat_dot_proto_dot_summary__pb2.tensorboard_dot_compat_dot_proto_dot_histogram__pb2 +except AttributeError: + tensorboard_dot_compat_dot_proto_dot_histogram__pb2 = tensorboard_dot_compat_dot_proto_dot_summary__pb2.tensorboard.compat.proto.histogram_pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$tensorboard/compat/proto/event.proto\x12\x0btensorboard\x1a&tensorboard/compat/proto/summary.proto\"\xf9\x02\n\x05\x45vent\x12\x11\n\twall_time\x18\x01 \x01(\x01\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x16\n\x0c\x66ile_version\x18\x03 \x01(\tH\x00\x12\x13\n\tgraph_def\x18\x04 \x01(\x0cH\x00\x12\'\n\x07summary\x18\x05 \x01(\x0b\x32\x14.tensorboard.SummaryH\x00\x12\x32\n\x0blog_message\x18\x06 \x01(\x0b\x32\x17.tensorboard.LogMessageB\x02\x18\x01H\x00\x12.\n\x0bsession_log\x18\x07 \x01(\x0b\x32\x17.tensorboard.SessionLogH\x00\x12=\n\x13tagged_run_metadata\x18\x08 \x01(\x0b\x32\x1e.tensorboard.TaggedRunMetadataH\x00\x12\x18\n\x0emeta_graph_def\x18\t \x01(\x0cH\x00\x12\x34\n\x0fsource_metadata\x18\n \x01(\x0b\x32\x1b.tensorboard.SourceMetadataB\x06\n\x04what\" \n\x0eSourceMetadata\x12\x0e\n\x06writer\x18\x01 \x01(\t\"\xa2\x01\n\nLogMessage\x12,\n\x05level\x18\x01 \x01(\x0e\x32\x1d.tensorboard.LogMessage.Level\x12\x0f\n\x07message\x18\x02 \x01(\t\"Q\n\x05Level\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tDEBUGGING\x10\n\x12\x08\n\x04INFO\x10\x14\x12\x08\n\x04WARN\x10\x1e\x12\t\n\x05\x45RROR\x10(\x12\t\n\x05\x46\x41TAL\x10\x32\x1a\x02\x18\x01:\x02\x18\x01\"\xb7\x01\n\nSessionLog\x12\x35\n\x06status\x18\x01 \x01(\x0e\x32%.tensorboard.SessionLog.SessionStatus\x12\x17\n\x0f\x63heckpoint_path\x18\x02 \x01(\t\x12\x0b\n\x03msg\x18\x03 \x01(\t\"L\n\rSessionStatus\x12\x16\n\x12STATUS_UNSPECIFIED\x10\x00\x12\t\n\x05START\x10\x01\x12\x08\n\x04STOP\x10\x02\x12\x0e\n\nCHECKPOINT\x10\x03\"6\n\x11TaggedRunMetadata\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12\x14\n\x0crun_metadata\x18\x02 \x01(\x0c\"$\n\x0eWatchdogConfig\x12\x12\n\ntimeout_ms\x18\x01 \x01(\x03\"&\n\x11RequestedExitCode\x12\x11\n\texit_code\x18\x01 \x01(\x05\"\xb9\x01\n\x16WorkerHeartbeatRequest\x12\x36\n\rshutdown_mode\x18\x01 \x01(\x0e\x32\x1f.tensorboard.WorkerShutdownMode\x12\x34\n\x0fwatchdog_config\x18\x02 \x01(\x0b\x32\x1b.tensorboard.WatchdogConfig\x12\x31\n\texit_code\x18\x03 \x01(\x0b\x32\x1e.tensorboard.RequestedExitCode\"\x85\x01\n\x17WorkerHeartbeatResponse\x12\x30\n\rhealth_status\x18\x01 \x01(\x0e\x32\x19.tensorboard.WorkerHealth\x12&\n\nworker_log\x18\x02 \x03(\x0b\x32\x12.tensorboard.Event\x12\x10\n\x08hostname\x18\x03 \x01(\t*[\n\x0cWorkerHealth\x12\x06\n\x02OK\x10\x00\x12\x1c\n\x18RECEIVED_SHUTDOWN_SIGNAL\x10\x01\x12\x12\n\x0eINTERNAL_ERROR\x10\x02\x12\x11\n\rSHUTTING_DOWN\x10\x03*k\n\x12WorkerShutdownMode\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x12\n\x0eNOT_CONFIGURED\x10\x01\x12\x18\n\x14WAIT_FOR_COORDINATOR\x10\x02\x12\x1a\n\x16SHUTDOWN_AFTER_TIMEOUT\x10\x03\x42p\n\x13org.tensorflow.utilB\x0b\x45ventProtosP\x01ZGgithub.com/tensorflow/tensorflow/tensorflow/go/core/util/event_go_proto\xf8\x01\x01\x62\x06proto3') + +_WORKERHEALTH = DESCRIPTOR.enum_types_by_name['WorkerHealth'] +WorkerHealth = enum_type_wrapper.EnumTypeWrapper(_WORKERHEALTH) +_WORKERSHUTDOWNMODE = DESCRIPTOR.enum_types_by_name['WorkerShutdownMode'] +WorkerShutdownMode = enum_type_wrapper.EnumTypeWrapper(_WORKERSHUTDOWNMODE) +OK = 0 +RECEIVED_SHUTDOWN_SIGNAL = 1 +INTERNAL_ERROR = 2 +SHUTTING_DOWN = 3 +DEFAULT = 0 +NOT_CONFIGURED = 1 +WAIT_FOR_COORDINATOR = 2 +SHUTDOWN_AFTER_TIMEOUT = 3 + + +_EVENT = DESCRIPTOR.message_types_by_name['Event'] +_SOURCEMETADATA = DESCRIPTOR.message_types_by_name['SourceMetadata'] +_LOGMESSAGE = DESCRIPTOR.message_types_by_name['LogMessage'] +_SESSIONLOG = DESCRIPTOR.message_types_by_name['SessionLog'] +_TAGGEDRUNMETADATA = DESCRIPTOR.message_types_by_name['TaggedRunMetadata'] +_WATCHDOGCONFIG = DESCRIPTOR.message_types_by_name['WatchdogConfig'] +_REQUESTEDEXITCODE = DESCRIPTOR.message_types_by_name['RequestedExitCode'] +_WORKERHEARTBEATREQUEST = DESCRIPTOR.message_types_by_name['WorkerHeartbeatRequest'] +_WORKERHEARTBEATRESPONSE = DESCRIPTOR.message_types_by_name['WorkerHeartbeatResponse'] +_LOGMESSAGE_LEVEL = _LOGMESSAGE.enum_types_by_name['Level'] +_SESSIONLOG_SESSIONSTATUS = _SESSIONLOG.enum_types_by_name['SessionStatus'] +Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), { + 'DESCRIPTOR' : _EVENT, + '__module__' : 'tensorboard.compat.proto.event_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.Event) + }) +_sym_db.RegisterMessage(Event) + +SourceMetadata = _reflection.GeneratedProtocolMessageType('SourceMetadata', (_message.Message,), { + 'DESCRIPTOR' : _SOURCEMETADATA, + '__module__' : 'tensorboard.compat.proto.event_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SourceMetadata) + }) +_sym_db.RegisterMessage(SourceMetadata) + +LogMessage = _reflection.GeneratedProtocolMessageType('LogMessage', (_message.Message,), { + 'DESCRIPTOR' : _LOGMESSAGE, + '__module__' : 'tensorboard.compat.proto.event_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.LogMessage) + }) +_sym_db.RegisterMessage(LogMessage) + +SessionLog = _reflection.GeneratedProtocolMessageType('SessionLog', (_message.Message,), { + 'DESCRIPTOR' : _SESSIONLOG, + '__module__' : 'tensorboard.compat.proto.event_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SessionLog) + }) +_sym_db.RegisterMessage(SessionLog) + +TaggedRunMetadata = _reflection.GeneratedProtocolMessageType('TaggedRunMetadata', (_message.Message,), { + 'DESCRIPTOR' : _TAGGEDRUNMETADATA, + '__module__' : 'tensorboard.compat.proto.event_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.TaggedRunMetadata) + }) +_sym_db.RegisterMessage(TaggedRunMetadata) + +WatchdogConfig = _reflection.GeneratedProtocolMessageType('WatchdogConfig', (_message.Message,), { + 'DESCRIPTOR' : _WATCHDOGCONFIG, + '__module__' : 'tensorboard.compat.proto.event_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.WatchdogConfig) + }) +_sym_db.RegisterMessage(WatchdogConfig) + +RequestedExitCode = _reflection.GeneratedProtocolMessageType('RequestedExitCode', (_message.Message,), { + 'DESCRIPTOR' : _REQUESTEDEXITCODE, + '__module__' : 'tensorboard.compat.proto.event_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.RequestedExitCode) + }) +_sym_db.RegisterMessage(RequestedExitCode) + +WorkerHeartbeatRequest = _reflection.GeneratedProtocolMessageType('WorkerHeartbeatRequest', (_message.Message,), { + 'DESCRIPTOR' : _WORKERHEARTBEATREQUEST, + '__module__' : 'tensorboard.compat.proto.event_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.WorkerHeartbeatRequest) + }) +_sym_db.RegisterMessage(WorkerHeartbeatRequest) + +WorkerHeartbeatResponse = _reflection.GeneratedProtocolMessageType('WorkerHeartbeatResponse', (_message.Message,), { + 'DESCRIPTOR' : _WORKERHEARTBEATRESPONSE, + '__module__' : 'tensorboard.compat.proto.event_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.WorkerHeartbeatResponse) + }) +_sym_db.RegisterMessage(WorkerHeartbeatResponse) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023org.tensorflow.utilB\013EventProtosP\001ZGgithub.com/tensorflow/tensorflow/tensorflow/go/core/util/event_go_proto\370\001\001' + _EVENT.fields_by_name['log_message']._options = None + _EVENT.fields_by_name['log_message']._serialized_options = b'\030\001' + _LOGMESSAGE_LEVEL._options = None + _LOGMESSAGE_LEVEL._serialized_options = b'\030\001' + _LOGMESSAGE._options = None + _LOGMESSAGE._serialized_options = b'\030\001' + _WORKERHEALTH._serialized_start=1316 + _WORKERHEALTH._serialized_end=1407 + _WORKERSHUTDOWNMODE._serialized_start=1409 + _WORKERSHUTDOWNMODE._serialized_end=1516 + _EVENT._serialized_start=94 + _EVENT._serialized_end=471 + _SOURCEMETADATA._serialized_start=473 + _SOURCEMETADATA._serialized_end=505 + _LOGMESSAGE._serialized_start=508 + _LOGMESSAGE._serialized_end=670 + _LOGMESSAGE_LEVEL._serialized_start=585 + _LOGMESSAGE_LEVEL._serialized_end=666 + _SESSIONLOG._serialized_start=673 + _SESSIONLOG._serialized_end=856 + _SESSIONLOG_SESSIONSTATUS._serialized_start=780 + _SESSIONLOG_SESSIONSTATUS._serialized_end=856 + _TAGGEDRUNMETADATA._serialized_start=858 + _TAGGEDRUNMETADATA._serialized_end=912 + _WATCHDOGCONFIG._serialized_start=914 + _WATCHDOGCONFIG._serialized_end=950 + _REQUESTEDEXITCODE._serialized_start=952 + _REQUESTEDEXITCODE._serialized_end=990 + _WORKERHEARTBEATREQUEST._serialized_start=993 + _WORKERHEARTBEATREQUEST._serialized_end=1178 + _WORKERHEARTBEATRESPONSE._serialized_start=1181 + _WORKERHEARTBEATRESPONSE._serialized_end=1314 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/full_type_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/full_type_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..97cce233f7dc538f3b7736e57c6beead12b7a01f --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/full_type_pb2.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/full_type.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n(tensorboard/compat/proto/full_type.proto\x12\x0btensorboard\"\x81\x01\n\x0b\x46ullTypeDef\x12(\n\x07type_id\x18\x01 \x01(\x0e\x32\x17.tensorboard.FullTypeId\x12&\n\x04\x61rgs\x18\x02 \x03(\x0b\x32\x18.tensorboard.FullTypeDef\x12\x0b\n\x01s\x18\x03 \x01(\tH\x00\x12\x0b\n\x01i\x18\x04 \x01(\x03H\x00\x42\x06\n\x04\x61ttr*\xda\x04\n\nFullTypeId\x12\r\n\tTFT_UNSET\x10\x00\x12\x0b\n\x07TFT_VAR\x10\x01\x12\x0b\n\x07TFT_ANY\x10\x02\x12\x0f\n\x0bTFT_PRODUCT\x10\x03\x12\r\n\tTFT_NAMED\x10\x04\x12\x10\n\x0cTFT_FOR_EACH\x10\x14\x12\x10\n\x0cTFT_CALLABLE\x10\x64\x12\x0f\n\nTFT_TENSOR\x10\xe8\x07\x12\x0e\n\tTFT_ARRAY\x10\xe9\x07\x12\x11\n\x0cTFT_OPTIONAL\x10\xea\x07\x12\x10\n\x0bTFT_LITERAL\x10\xeb\x07\x12\x10\n\x0bTFT_ENCODED\x10\xec\x07\x12\x15\n\x10TFT_SHAPE_TENSOR\x10\xed\x07\x12\r\n\x08TFT_BOOL\x10\xc8\x01\x12\x0e\n\tTFT_UINT8\x10\xc9\x01\x12\x0f\n\nTFT_UINT16\x10\xca\x01\x12\x0f\n\nTFT_UINT32\x10\xcb\x01\x12\x0f\n\nTFT_UINT64\x10\xcc\x01\x12\r\n\x08TFT_INT8\x10\xcd\x01\x12\x0e\n\tTFT_INT16\x10\xce\x01\x12\x0e\n\tTFT_INT32\x10\xcf\x01\x12\x0e\n\tTFT_INT64\x10\xd0\x01\x12\r\n\x08TFT_HALF\x10\xd1\x01\x12\x0e\n\tTFT_FLOAT\x10\xd2\x01\x12\x0f\n\nTFT_DOUBLE\x10\xd3\x01\x12\x11\n\x0cTFT_BFLOAT16\x10\xd7\x01\x12\x12\n\rTFT_COMPLEX64\x10\xd4\x01\x12\x13\n\x0eTFT_COMPLEX128\x10\xd5\x01\x12\x0f\n\nTFT_STRING\x10\xd6\x01\x12\x10\n\x0bTFT_DATASET\x10\xf6N\x12\x0f\n\nTFT_RAGGED\x10\xf7N\x12\x11\n\x0cTFT_ITERATOR\x10\xf8N\x12\x13\n\x0eTFT_MUTEX_LOCK\x10\xdaO\x12\x17\n\x12TFT_LEGACY_VARIANT\x10\xdbOB\x81\x01\n\x18org.tensorflow.frameworkB\x0e\x46ullTypeProtosP\x01ZPgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/full_type_go_proto\xf8\x01\x01\x62\x06proto3') + +_FULLTYPEID = DESCRIPTOR.enum_types_by_name['FullTypeId'] +FullTypeId = enum_type_wrapper.EnumTypeWrapper(_FULLTYPEID) +TFT_UNSET = 0 +TFT_VAR = 1 +TFT_ANY = 2 +TFT_PRODUCT = 3 +TFT_NAMED = 4 +TFT_FOR_EACH = 20 +TFT_CALLABLE = 100 +TFT_TENSOR = 1000 +TFT_ARRAY = 1001 +TFT_OPTIONAL = 1002 +TFT_LITERAL = 1003 +TFT_ENCODED = 1004 +TFT_SHAPE_TENSOR = 1005 +TFT_BOOL = 200 +TFT_UINT8 = 201 +TFT_UINT16 = 202 +TFT_UINT32 = 203 +TFT_UINT64 = 204 +TFT_INT8 = 205 +TFT_INT16 = 206 +TFT_INT32 = 207 +TFT_INT64 = 208 +TFT_HALF = 209 +TFT_FLOAT = 210 +TFT_DOUBLE = 211 +TFT_BFLOAT16 = 215 +TFT_COMPLEX64 = 212 +TFT_COMPLEX128 = 213 +TFT_STRING = 214 +TFT_DATASET = 10102 +TFT_RAGGED = 10103 +TFT_ITERATOR = 10104 +TFT_MUTEX_LOCK = 10202 +TFT_LEGACY_VARIANT = 10203 + + +_FULLTYPEDEF = DESCRIPTOR.message_types_by_name['FullTypeDef'] +FullTypeDef = _reflection.GeneratedProtocolMessageType('FullTypeDef', (_message.Message,), { + 'DESCRIPTOR' : _FULLTYPEDEF, + '__module__' : 'tensorboard.compat.proto.full_type_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FullTypeDef) + }) +_sym_db.RegisterMessage(FullTypeDef) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\016FullTypeProtosP\001ZPgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/full_type_go_proto\370\001\001' + _FULLTYPEID._serialized_start=190 + _FULLTYPEID._serialized_end=792 + _FULLTYPEDEF._serialized_start=58 + _FULLTYPEDEF._serialized_end=187 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/function_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/function_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..c746ea251e152c771f8a092c53655f056d0c53f0 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/function_pb2.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/function.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import attr_value_pb2 as tensorboard_dot_compat_dot_proto_dot_attr__value__pb2 +from tensorboard.compat.proto import node_def_pb2 as tensorboard_dot_compat_dot_proto_dot_node__def__pb2 +from tensorboard.compat.proto import op_def_pb2 as tensorboard_dot_compat_dot_proto_dot_op__def__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'tensorboard/compat/proto/function.proto\x12\x0btensorboard\x1a)tensorboard/compat/proto/attr_value.proto\x1a\'tensorboard/compat/proto/node_def.proto\x1a%tensorboard/compat/proto/op_def.proto\"\xab\x01\n\x12\x46unctionDefLibrary\x12*\n\x08\x66unction\x18\x01 \x03(\x0b\x32\x18.tensorboard.FunctionDef\x12*\n\x08gradient\x18\x02 \x03(\x0b\x32\x18.tensorboard.GradientDef\x12=\n\x14registered_gradients\x18\x03 \x03(\x0b\x32\x1f.tensorboard.RegisteredGradient\"\xcf\x06\n\x0b\x46unctionDef\x12%\n\tsignature\x18\x01 \x01(\x0b\x32\x12.tensorboard.OpDef\x12\x30\n\x04\x61ttr\x18\x05 \x03(\x0b\x32\".tensorboard.FunctionDef.AttrEntry\x12\x37\n\x08\x61rg_attr\x18\x07 \x03(\x0b\x32%.tensorboard.FunctionDef.ArgAttrEntry\x12Q\n\x16resource_arg_unique_id\x18\x08 \x03(\x0b\x32\x31.tensorboard.FunctionDef.ResourceArgUniqueIdEntry\x12&\n\x08node_def\x18\x03 \x03(\x0b\x32\x14.tensorboard.NodeDef\x12.\n\x03ret\x18\x04 \x03(\x0b\x32!.tensorboard.FunctionDef.RetEntry\x12=\n\x0b\x63ontrol_ret\x18\x06 \x03(\x0b\x32(.tensorboard.FunctionDef.ControlRetEntry\x1a\x43\n\tAttrEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.tensorboard.AttrValue:\x02\x38\x01\x1a\x8a\x01\n\x08\x41rgAttrs\x12\x39\n\x04\x61ttr\x18\x01 \x03(\x0b\x32+.tensorboard.FunctionDef.ArgAttrs.AttrEntry\x1a\x43\n\tAttrEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.tensorboard.AttrValue:\x02\x38\x01\x1aQ\n\x0c\x41rgAttrEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.tensorboard.FunctionDef.ArgAttrs:\x02\x38\x01\x1a:\n\x18ResourceArgUniqueIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\x1a*\n\x08RetEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0f\x43ontrolRetEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x02\x10\x03\";\n\x0bGradientDef\x12\x15\n\rfunction_name\x18\x01 \x01(\t\x12\x15\n\rgradient_func\x18\x02 \x01(\t\"G\n\x12RegisteredGradient\x12\x15\n\rgradient_func\x18\x01 \x01(\t\x12\x1a\n\x12registered_op_type\x18\x02 \x01(\tB\x80\x01\n\x18org.tensorflow.frameworkB\x0e\x46unctionProtosP\x01ZOgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/function_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_FUNCTIONDEFLIBRARY = DESCRIPTOR.message_types_by_name['FunctionDefLibrary'] +_FUNCTIONDEF = DESCRIPTOR.message_types_by_name['FunctionDef'] +_FUNCTIONDEF_ATTRENTRY = _FUNCTIONDEF.nested_types_by_name['AttrEntry'] +_FUNCTIONDEF_ARGATTRS = _FUNCTIONDEF.nested_types_by_name['ArgAttrs'] +_FUNCTIONDEF_ARGATTRS_ATTRENTRY = _FUNCTIONDEF_ARGATTRS.nested_types_by_name['AttrEntry'] +_FUNCTIONDEF_ARGATTRENTRY = _FUNCTIONDEF.nested_types_by_name['ArgAttrEntry'] +_FUNCTIONDEF_RESOURCEARGUNIQUEIDENTRY = _FUNCTIONDEF.nested_types_by_name['ResourceArgUniqueIdEntry'] +_FUNCTIONDEF_RETENTRY = _FUNCTIONDEF.nested_types_by_name['RetEntry'] +_FUNCTIONDEF_CONTROLRETENTRY = _FUNCTIONDEF.nested_types_by_name['ControlRetEntry'] +_GRADIENTDEF = DESCRIPTOR.message_types_by_name['GradientDef'] +_REGISTEREDGRADIENT = DESCRIPTOR.message_types_by_name['RegisteredGradient'] +FunctionDefLibrary = _reflection.GeneratedProtocolMessageType('FunctionDefLibrary', (_message.Message,), { + 'DESCRIPTOR' : _FUNCTIONDEFLIBRARY, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FunctionDefLibrary) + }) +_sym_db.RegisterMessage(FunctionDefLibrary) + +FunctionDef = _reflection.GeneratedProtocolMessageType('FunctionDef', (_message.Message,), { + + 'AttrEntry' : _reflection.GeneratedProtocolMessageType('AttrEntry', (_message.Message,), { + 'DESCRIPTOR' : _FUNCTIONDEF_ATTRENTRY, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FunctionDef.AttrEntry) + }) + , + + 'ArgAttrs' : _reflection.GeneratedProtocolMessageType('ArgAttrs', (_message.Message,), { + + 'AttrEntry' : _reflection.GeneratedProtocolMessageType('AttrEntry', (_message.Message,), { + 'DESCRIPTOR' : _FUNCTIONDEF_ARGATTRS_ATTRENTRY, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FunctionDef.ArgAttrs.AttrEntry) + }) + , + 'DESCRIPTOR' : _FUNCTIONDEF_ARGATTRS, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FunctionDef.ArgAttrs) + }) + , + + 'ArgAttrEntry' : _reflection.GeneratedProtocolMessageType('ArgAttrEntry', (_message.Message,), { + 'DESCRIPTOR' : _FUNCTIONDEF_ARGATTRENTRY, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FunctionDef.ArgAttrEntry) + }) + , + + 'ResourceArgUniqueIdEntry' : _reflection.GeneratedProtocolMessageType('ResourceArgUniqueIdEntry', (_message.Message,), { + 'DESCRIPTOR' : _FUNCTIONDEF_RESOURCEARGUNIQUEIDENTRY, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FunctionDef.ResourceArgUniqueIdEntry) + }) + , + + 'RetEntry' : _reflection.GeneratedProtocolMessageType('RetEntry', (_message.Message,), { + 'DESCRIPTOR' : _FUNCTIONDEF_RETENTRY, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FunctionDef.RetEntry) + }) + , + + 'ControlRetEntry' : _reflection.GeneratedProtocolMessageType('ControlRetEntry', (_message.Message,), { + 'DESCRIPTOR' : _FUNCTIONDEF_CONTROLRETENTRY, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FunctionDef.ControlRetEntry) + }) + , + 'DESCRIPTOR' : _FUNCTIONDEF, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FunctionDef) + }) +_sym_db.RegisterMessage(FunctionDef) +_sym_db.RegisterMessage(FunctionDef.AttrEntry) +_sym_db.RegisterMessage(FunctionDef.ArgAttrs) +_sym_db.RegisterMessage(FunctionDef.ArgAttrs.AttrEntry) +_sym_db.RegisterMessage(FunctionDef.ArgAttrEntry) +_sym_db.RegisterMessage(FunctionDef.ResourceArgUniqueIdEntry) +_sym_db.RegisterMessage(FunctionDef.RetEntry) +_sym_db.RegisterMessage(FunctionDef.ControlRetEntry) + +GradientDef = _reflection.GeneratedProtocolMessageType('GradientDef', (_message.Message,), { + 'DESCRIPTOR' : _GRADIENTDEF, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GradientDef) + }) +_sym_db.RegisterMessage(GradientDef) + +RegisteredGradient = _reflection.GeneratedProtocolMessageType('RegisteredGradient', (_message.Message,), { + 'DESCRIPTOR' : _REGISTEREDGRADIENT, + '__module__' : 'tensorboard.compat.proto.function_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.RegisteredGradient) + }) +_sym_db.RegisterMessage(RegisteredGradient) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\016FunctionProtosP\001ZOgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/function_go_proto\370\001\001' + _FUNCTIONDEF_ATTRENTRY._options = None + _FUNCTIONDEF_ATTRENTRY._serialized_options = b'8\001' + _FUNCTIONDEF_ARGATTRS_ATTRENTRY._options = None + _FUNCTIONDEF_ARGATTRS_ATTRENTRY._serialized_options = b'8\001' + _FUNCTIONDEF_ARGATTRENTRY._options = None + _FUNCTIONDEF_ARGATTRENTRY._serialized_options = b'8\001' + _FUNCTIONDEF_RESOURCEARGUNIQUEIDENTRY._options = None + _FUNCTIONDEF_RESOURCEARGUNIQUEIDENTRY._serialized_options = b'8\001' + _FUNCTIONDEF_RETENTRY._options = None + _FUNCTIONDEF_RETENTRY._serialized_options = b'8\001' + _FUNCTIONDEF_CONTROLRETENTRY._options = None + _FUNCTIONDEF_CONTROLRETENTRY._serialized_options = b'8\001' + _FUNCTIONDEFLIBRARY._serialized_start=180 + _FUNCTIONDEFLIBRARY._serialized_end=351 + _FUNCTIONDEF._serialized_start=354 + _FUNCTIONDEF._serialized_end=1201 + _FUNCTIONDEF_ATTRENTRY._serialized_start=749 + _FUNCTIONDEF_ATTRENTRY._serialized_end=816 + _FUNCTIONDEF_ARGATTRS._serialized_start=819 + _FUNCTIONDEF_ARGATTRS._serialized_end=957 + _FUNCTIONDEF_ARGATTRS_ATTRENTRY._serialized_start=749 + _FUNCTIONDEF_ARGATTRS_ATTRENTRY._serialized_end=816 + _FUNCTIONDEF_ARGATTRENTRY._serialized_start=959 + _FUNCTIONDEF_ARGATTRENTRY._serialized_end=1040 + _FUNCTIONDEF_RESOURCEARGUNIQUEIDENTRY._serialized_start=1042 + _FUNCTIONDEF_RESOURCEARGUNIQUEIDENTRY._serialized_end=1100 + _FUNCTIONDEF_RETENTRY._serialized_start=1102 + _FUNCTIONDEF_RETENTRY._serialized_end=1144 + _FUNCTIONDEF_CONTROLRETENTRY._serialized_start=1146 + _FUNCTIONDEF_CONTROLRETENTRY._serialized_end=1195 + _GRADIENTDEF._serialized_start=1203 + _GRADIENTDEF._serialized_end=1262 + _REGISTEREDGRADIENT._serialized_start=1264 + _REGISTEREDGRADIENT._serialized_end=1335 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/graph_debug_info_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/graph_debug_info_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..22f8e6b94c80ccc4def429b46853654414ae14e2 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/graph_debug_info_pb2.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/graph_debug_info.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/tensorboard/compat/proto/graph_debug_info.proto\x12\x0btensorboard\"\xab\x06\n\x0eGraphDebugInfo\x12\r\n\x05\x66iles\x18\x01 \x03(\t\x12\x41\n\x0c\x66rames_by_id\x18\x04 \x03(\x0b\x32+.tensorboard.GraphDebugInfo.FramesByIdEntry\x12\x41\n\x0ctraces_by_id\x18\x06 \x03(\x0b\x32+.tensorboard.GraphDebugInfo.TracesByIdEntry\x12\x37\n\x06traces\x18\x02 \x03(\x0b\x32\'.tensorboard.GraphDebugInfo.TracesEntry\x12H\n\x10name_to_trace_id\x18\x05 \x03(\x0b\x32..tensorboard.GraphDebugInfo.NameToTraceIdEntry\x1aX\n\x0b\x46ileLineCol\x12\x12\n\nfile_index\x18\x01 \x01(\x05\x12\x0c\n\x04line\x18\x02 \x01(\x05\x12\x0b\n\x03\x63ol\x18\x03 \x01(\x05\x12\x0c\n\x04\x66unc\x18\x04 \x01(\t\x12\x0c\n\x04\x63ode\x18\x05 \x01(\t\x1a\x63\n\nStackTrace\x12?\n\x0e\x66ile_line_cols\x18\x01 \x03(\x0b\x32\'.tensorboard.GraphDebugInfo.FileLineCol\x12\x14\n\x08\x66rame_id\x18\x02 \x03(\x06\x42\x02\x10\x01\x1aZ\n\x0f\x46ramesByIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\x06\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.tensorboard.GraphDebugInfo.FileLineCol:\x02\x38\x01\x1aY\n\x0fTracesByIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\x06\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.tensorboard.GraphDebugInfo.StackTrace:\x02\x38\x01\x1aU\n\x0bTracesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.tensorboard.GraphDebugInfo.StackTrace:\x02\x38\x01\x1a\x34\n\x12NameToTraceIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x06:\x02\x38\x01\x42\x8c\x01\n\x18org.tensorflow.frameworkB\x14GraphDebugInfoProtosP\x01ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01') + + + +_GRAPHDEBUGINFO = DESCRIPTOR.message_types_by_name['GraphDebugInfo'] +_GRAPHDEBUGINFO_FILELINECOL = _GRAPHDEBUGINFO.nested_types_by_name['FileLineCol'] +_GRAPHDEBUGINFO_STACKTRACE = _GRAPHDEBUGINFO.nested_types_by_name['StackTrace'] +_GRAPHDEBUGINFO_FRAMESBYIDENTRY = _GRAPHDEBUGINFO.nested_types_by_name['FramesByIdEntry'] +_GRAPHDEBUGINFO_TRACESBYIDENTRY = _GRAPHDEBUGINFO.nested_types_by_name['TracesByIdEntry'] +_GRAPHDEBUGINFO_TRACESENTRY = _GRAPHDEBUGINFO.nested_types_by_name['TracesEntry'] +_GRAPHDEBUGINFO_NAMETOTRACEIDENTRY = _GRAPHDEBUGINFO.nested_types_by_name['NameToTraceIdEntry'] +GraphDebugInfo = _reflection.GeneratedProtocolMessageType('GraphDebugInfo', (_message.Message,), { + + 'FileLineCol' : _reflection.GeneratedProtocolMessageType('FileLineCol', (_message.Message,), { + 'DESCRIPTOR' : _GRAPHDEBUGINFO_FILELINECOL, + '__module__' : 'tensorboard.compat.proto.graph_debug_info_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GraphDebugInfo.FileLineCol) + }) + , + + 'StackTrace' : _reflection.GeneratedProtocolMessageType('StackTrace', (_message.Message,), { + 'DESCRIPTOR' : _GRAPHDEBUGINFO_STACKTRACE, + '__module__' : 'tensorboard.compat.proto.graph_debug_info_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GraphDebugInfo.StackTrace) + }) + , + + 'FramesByIdEntry' : _reflection.GeneratedProtocolMessageType('FramesByIdEntry', (_message.Message,), { + 'DESCRIPTOR' : _GRAPHDEBUGINFO_FRAMESBYIDENTRY, + '__module__' : 'tensorboard.compat.proto.graph_debug_info_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GraphDebugInfo.FramesByIdEntry) + }) + , + + 'TracesByIdEntry' : _reflection.GeneratedProtocolMessageType('TracesByIdEntry', (_message.Message,), { + 'DESCRIPTOR' : _GRAPHDEBUGINFO_TRACESBYIDENTRY, + '__module__' : 'tensorboard.compat.proto.graph_debug_info_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GraphDebugInfo.TracesByIdEntry) + }) + , + + 'TracesEntry' : _reflection.GeneratedProtocolMessageType('TracesEntry', (_message.Message,), { + 'DESCRIPTOR' : _GRAPHDEBUGINFO_TRACESENTRY, + '__module__' : 'tensorboard.compat.proto.graph_debug_info_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GraphDebugInfo.TracesEntry) + }) + , + + 'NameToTraceIdEntry' : _reflection.GeneratedProtocolMessageType('NameToTraceIdEntry', (_message.Message,), { + 'DESCRIPTOR' : _GRAPHDEBUGINFO_NAMETOTRACEIDENTRY, + '__module__' : 'tensorboard.compat.proto.graph_debug_info_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GraphDebugInfo.NameToTraceIdEntry) + }) + , + 'DESCRIPTOR' : _GRAPHDEBUGINFO, + '__module__' : 'tensorboard.compat.proto.graph_debug_info_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GraphDebugInfo) + }) +_sym_db.RegisterMessage(GraphDebugInfo) +_sym_db.RegisterMessage(GraphDebugInfo.FileLineCol) +_sym_db.RegisterMessage(GraphDebugInfo.StackTrace) +_sym_db.RegisterMessage(GraphDebugInfo.FramesByIdEntry) +_sym_db.RegisterMessage(GraphDebugInfo.TracesByIdEntry) +_sym_db.RegisterMessage(GraphDebugInfo.TracesEntry) +_sym_db.RegisterMessage(GraphDebugInfo.NameToTraceIdEntry) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\024GraphDebugInfoProtosP\001ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\370\001\001' + _GRAPHDEBUGINFO_STACKTRACE.fields_by_name['frame_id']._options = None + _GRAPHDEBUGINFO_STACKTRACE.fields_by_name['frame_id']._serialized_options = b'\020\001' + _GRAPHDEBUGINFO_FRAMESBYIDENTRY._options = None + _GRAPHDEBUGINFO_FRAMESBYIDENTRY._serialized_options = b'8\001' + _GRAPHDEBUGINFO_TRACESBYIDENTRY._options = None + _GRAPHDEBUGINFO_TRACESBYIDENTRY._serialized_options = b'8\001' + _GRAPHDEBUGINFO_TRACESENTRY._options = None + _GRAPHDEBUGINFO_TRACESENTRY._serialized_options = b'8\001' + _GRAPHDEBUGINFO_NAMETOTRACEIDENTRY._options = None + _GRAPHDEBUGINFO_NAMETOTRACEIDENTRY._serialized_options = b'8\001' + _GRAPHDEBUGINFO._serialized_start=65 + _GRAPHDEBUGINFO._serialized_end=876 + _GRAPHDEBUGINFO_FILELINECOL._serialized_start=363 + _GRAPHDEBUGINFO_FILELINECOL._serialized_end=451 + _GRAPHDEBUGINFO_STACKTRACE._serialized_start=453 + _GRAPHDEBUGINFO_STACKTRACE._serialized_end=552 + _GRAPHDEBUGINFO_FRAMESBYIDENTRY._serialized_start=554 + _GRAPHDEBUGINFO_FRAMESBYIDENTRY._serialized_end=644 + _GRAPHDEBUGINFO_TRACESBYIDENTRY._serialized_start=646 + _GRAPHDEBUGINFO_TRACESBYIDENTRY._serialized_end=735 + _GRAPHDEBUGINFO_TRACESENTRY._serialized_start=737 + _GRAPHDEBUGINFO_TRACESENTRY._serialized_end=822 + _GRAPHDEBUGINFO_NAMETOTRACEIDENTRY._serialized_start=824 + _GRAPHDEBUGINFO_NAMETOTRACEIDENTRY._serialized_end=876 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/graph_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/graph_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..050ddad9ecb246d28a55ea3a458534aed541493f --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/graph_pb2.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/graph.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import function_pb2 as tensorboard_dot_compat_dot_proto_dot_function__pb2 +from tensorboard.compat.proto import graph_debug_info_pb2 as tensorboard_dot_compat_dot_proto_dot_graph__debug__info__pb2 +from tensorboard.compat.proto import node_def_pb2 as tensorboard_dot_compat_dot_proto_dot_node__def__pb2 +from tensorboard.compat.proto import versions_pb2 as tensorboard_dot_compat_dot_proto_dot_versions__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$tensorboard/compat/proto/graph.proto\x12\x0btensorboard\x1a\'tensorboard/compat/proto/function.proto\x1a/tensorboard/compat/proto/graph_debug_info.proto\x1a\'tensorboard/compat/proto/node_def.proto\x1a\'tensorboard/compat/proto/versions.proto\"\xd1\x01\n\x08GraphDef\x12\"\n\x04node\x18\x01 \x03(\x0b\x32\x14.tensorboard.NodeDef\x12)\n\x08versions\x18\x04 \x01(\x0b\x32\x17.tensorboard.VersionDef\x12\x13\n\x07version\x18\x03 \x01(\x05\x42\x02\x18\x01\x12\x30\n\x07library\x18\x02 \x01(\x0b\x32\x1f.tensorboard.FunctionDefLibrary\x12/\n\ndebug_info\x18\x05 \x01(\x0b\x32\x1b.tensorboard.GraphDebugInfoBz\n\x18org.tensorflow.frameworkB\x0bGraphProtosP\x01ZLgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/graph_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_GRAPHDEF = DESCRIPTOR.message_types_by_name['GraphDef'] +GraphDef = _reflection.GeneratedProtocolMessageType('GraphDef', (_message.Message,), { + 'DESCRIPTOR' : _GRAPHDEF, + '__module__' : 'tensorboard.compat.proto.graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.GraphDef) + }) +_sym_db.RegisterMessage(GraphDef) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\013GraphProtosP\001ZLgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/graph_go_proto\370\001\001' + _GRAPHDEF.fields_by_name['version']._options = None + _GRAPHDEF.fields_by_name['version']._serialized_options = b'\030\001' + _GRAPHDEF._serialized_start=226 + _GRAPHDEF._serialized_end=435 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/histogram_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/histogram_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..f4f37195ada91a0733f3178213fc9b1000615512 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/histogram_pb2.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/histogram.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n(tensorboard/compat/proto/histogram.proto\x12\x0btensorboard\"\x87\x01\n\x0eHistogramProto\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01\x12\x0b\n\x03num\x18\x03 \x01(\x01\x12\x0b\n\x03sum\x18\x04 \x01(\x01\x12\x13\n\x0bsum_squares\x18\x05 \x01(\x01\x12\x18\n\x0c\x62ucket_limit\x18\x06 \x03(\x01\x42\x02\x10\x01\x12\x12\n\x06\x62ucket\x18\x07 \x03(\x01\x42\x02\x10\x01\x42\\\n\x18org.tensorflow.frameworkP\x01Z;github.com/google/tsl/tsl/go/core/protobuf/summary_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_HISTOGRAMPROTO = DESCRIPTOR.message_types_by_name['HistogramProto'] +HistogramProto = _reflection.GeneratedProtocolMessageType('HistogramProto', (_message.Message,), { + 'DESCRIPTOR' : _HISTOGRAMPROTO, + '__module__' : 'tensorboard.compat.proto.histogram_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.HistogramProto) + }) +_sym_db.RegisterMessage(HistogramProto) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkP\001Z;github.com/google/tsl/tsl/go/core/protobuf/summary_go_proto\370\001\001' + _HISTOGRAMPROTO.fields_by_name['bucket_limit']._options = None + _HISTOGRAMPROTO.fields_by_name['bucket_limit']._serialized_options = b'\020\001' + _HISTOGRAMPROTO.fields_by_name['bucket']._options = None + _HISTOGRAMPROTO.fields_by_name['bucket']._serialized_options = b'\020\001' + _HISTOGRAMPROTO._serialized_start=58 + _HISTOGRAMPROTO._serialized_end=193 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/node_def_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/node_def_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..1d5474d0336911732a65baa43e3e41b1f967a591 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/node_def_pb2.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/node_def.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import attr_value_pb2 as tensorboard_dot_compat_dot_proto_dot_attr__value__pb2 +from tensorboard.compat.proto import full_type_pb2 as tensorboard_dot_compat_dot_proto_dot_full__type__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'tensorboard/compat/proto/node_def.proto\x12\x0btensorboard\x1a)tensorboard/compat/proto/attr_value.proto\x1a(tensorboard/compat/proto/full_type.proto\"\x8a\x03\n\x07NodeDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02op\x18\x02 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12\x0e\n\x06\x64\x65vice\x18\x04 \x01(\t\x12,\n\x04\x61ttr\x18\x05 \x03(\x0b\x32\x1e.tensorboard.NodeDef.AttrEntry\x12K\n\x17\x65xperimental_debug_info\x18\x06 \x01(\x0b\x32*.tensorboard.NodeDef.ExperimentalDebugInfo\x12\x33\n\x11\x65xperimental_type\x18\x07 \x01(\x0b\x32\x18.tensorboard.FullTypeDef\x1a\x43\n\tAttrEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.tensorboard.AttrValue:\x02\x38\x01\x1aQ\n\x15\x45xperimentalDebugInfo\x12\x1b\n\x13original_node_names\x18\x01 \x03(\t\x12\x1b\n\x13original_func_names\x18\x02 \x03(\tB{\n\x18org.tensorflow.frameworkB\tNodeProtoP\x01ZOgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/node_def_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_NODEDEF = DESCRIPTOR.message_types_by_name['NodeDef'] +_NODEDEF_ATTRENTRY = _NODEDEF.nested_types_by_name['AttrEntry'] +_NODEDEF_EXPERIMENTALDEBUGINFO = _NODEDEF.nested_types_by_name['ExperimentalDebugInfo'] +NodeDef = _reflection.GeneratedProtocolMessageType('NodeDef', (_message.Message,), { + + 'AttrEntry' : _reflection.GeneratedProtocolMessageType('AttrEntry', (_message.Message,), { + 'DESCRIPTOR' : _NODEDEF_ATTRENTRY, + '__module__' : 'tensorboard.compat.proto.node_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.NodeDef.AttrEntry) + }) + , + + 'ExperimentalDebugInfo' : _reflection.GeneratedProtocolMessageType('ExperimentalDebugInfo', (_message.Message,), { + 'DESCRIPTOR' : _NODEDEF_EXPERIMENTALDEBUGINFO, + '__module__' : 'tensorboard.compat.proto.node_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.NodeDef.ExperimentalDebugInfo) + }) + , + 'DESCRIPTOR' : _NODEDEF, + '__module__' : 'tensorboard.compat.proto.node_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.NodeDef) + }) +_sym_db.RegisterMessage(NodeDef) +_sym_db.RegisterMessage(NodeDef.AttrEntry) +_sym_db.RegisterMessage(NodeDef.ExperimentalDebugInfo) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\tNodeProtoP\001ZOgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/node_def_go_proto\370\001\001' + _NODEDEF_ATTRENTRY._options = None + _NODEDEF_ATTRENTRY._serialized_options = b'8\001' + _NODEDEF._serialized_start=142 + _NODEDEF._serialized_end=536 + _NODEDEF_ATTRENTRY._serialized_start=386 + _NODEDEF_ATTRENTRY._serialized_end=453 + _NODEDEF_EXPERIMENTALDEBUGINFO._serialized_start=455 + _NODEDEF_EXPERIMENTALDEBUGINFO._serialized_end=536 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/op_def_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/op_def_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..aa5d3891004e08dff7e089aa009f37e4d893b8f5 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/op_def_pb2.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/op_def.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import attr_value_pb2 as tensorboard_dot_compat_dot_proto_dot_attr__value__pb2 +from tensorboard.compat.proto import full_type_pb2 as tensorboard_dot_compat_dot_proto_dot_full__type__pb2 +from tensorboard.compat.proto import resource_handle_pb2 as tensorboard_dot_compat_dot_proto_dot_resource__handle__pb2 +from tensorboard.compat.proto import types_pb2 as tensorboard_dot_compat_dot_proto_dot_types__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%tensorboard/compat/proto/op_def.proto\x12\x0btensorboard\x1a)tensorboard/compat/proto/attr_value.proto\x1a(tensorboard/compat/proto/full_type.proto\x1a.tensorboard/compat/proto/resource_handle.proto\x1a$tensorboard/compat/proto/types.proto\"\xfc\x06\n\x05OpDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\tinput_arg\x18\x02 \x03(\x0b\x32\x19.tensorboard.OpDef.ArgDef\x12-\n\noutput_arg\x18\x03 \x03(\x0b\x32\x19.tensorboard.OpDef.ArgDef\x12\x16\n\x0e\x63ontrol_output\x18\x14 \x03(\t\x12(\n\x04\x61ttr\x18\x04 \x03(\x0b\x32\x1a.tensorboard.OpDef.AttrDef\x12/\n\x0b\x64\x65precation\x18\x08 \x01(\x0b\x32\x1a.tensorboard.OpDeprecation\x12\x0f\n\x07summary\x18\x05 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x06 \x01(\t\x12\x16\n\x0eis_commutative\x18\x12 \x01(\x08\x12\x14\n\x0cis_aggregate\x18\x10 \x01(\x08\x12\x13\n\x0bis_stateful\x18\x11 \x01(\x08\x12\"\n\x1a\x61llows_uninitialized_input\x18\x13 \x01(\x08\x12$\n\x1cis_distributed_communication\x18\x15 \x01(\x08\x1a\x9f\x02\n\x06\x41rgDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12#\n\x04type\x18\x03 \x01(\x0e\x32\x15.tensorboard.DataType\x12\x11\n\ttype_attr\x18\x04 \x01(\t\x12\x13\n\x0bnumber_attr\x18\x05 \x01(\t\x12\x16\n\x0etype_list_attr\x18\x06 \x01(\t\x12\x43\n\x0bhandle_data\x18\x07 \x03(\x0b\x32..tensorboard.ResourceHandleProto.DtypeAndShape\x12\x0e\n\x06is_ref\x18\x10 \x01(\x08\x12\x38\n\x16\x65xperimental_full_type\x18\x11 \x01(\x0b\x32\x18.tensorboard.FullTypeDef\x1a\xbf\x01\n\x07\x41ttrDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12-\n\rdefault_value\x18\x03 \x01(\x0b\x32\x16.tensorboard.AttrValue\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x13\n\x0bhas_minimum\x18\x05 \x01(\x08\x12\x0f\n\x07minimum\x18\x06 \x01(\x03\x12.\n\x0e\x61llowed_values\x18\x07 \x01(\x0b\x32\x16.tensorboard.AttrValue\"5\n\rOpDeprecation\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x13\n\x0b\x65xplanation\x18\x02 \x01(\t\"(\n\x06OpList\x12\x1e\n\x02op\x18\x01 \x03(\x0b\x32\x12.tensorboard.OpDefB{\n\x18org.tensorflow.frameworkB\x0bOpDefProtosP\x01ZMgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/op_def_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_OPDEF = DESCRIPTOR.message_types_by_name['OpDef'] +_OPDEF_ARGDEF = _OPDEF.nested_types_by_name['ArgDef'] +_OPDEF_ATTRDEF = _OPDEF.nested_types_by_name['AttrDef'] +_OPDEPRECATION = DESCRIPTOR.message_types_by_name['OpDeprecation'] +_OPLIST = DESCRIPTOR.message_types_by_name['OpList'] +OpDef = _reflection.GeneratedProtocolMessageType('OpDef', (_message.Message,), { + + 'ArgDef' : _reflection.GeneratedProtocolMessageType('ArgDef', (_message.Message,), { + 'DESCRIPTOR' : _OPDEF_ARGDEF, + '__module__' : 'tensorboard.compat.proto.op_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.OpDef.ArgDef) + }) + , + + 'AttrDef' : _reflection.GeneratedProtocolMessageType('AttrDef', (_message.Message,), { + 'DESCRIPTOR' : _OPDEF_ATTRDEF, + '__module__' : 'tensorboard.compat.proto.op_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.OpDef.AttrDef) + }) + , + 'DESCRIPTOR' : _OPDEF, + '__module__' : 'tensorboard.compat.proto.op_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.OpDef) + }) +_sym_db.RegisterMessage(OpDef) +_sym_db.RegisterMessage(OpDef.ArgDef) +_sym_db.RegisterMessage(OpDef.AttrDef) + +OpDeprecation = _reflection.GeneratedProtocolMessageType('OpDeprecation', (_message.Message,), { + 'DESCRIPTOR' : _OPDEPRECATION, + '__module__' : 'tensorboard.compat.proto.op_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.OpDeprecation) + }) +_sym_db.RegisterMessage(OpDeprecation) + +OpList = _reflection.GeneratedProtocolMessageType('OpList', (_message.Message,), { + 'DESCRIPTOR' : _OPLIST, + '__module__' : 'tensorboard.compat.proto.op_def_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.OpList) + }) +_sym_db.RegisterMessage(OpList) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\013OpDefProtosP\001ZMgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/op_def_go_proto\370\001\001' + _OPDEF._serialized_start=226 + _OPDEF._serialized_end=1118 + _OPDEF_ARGDEF._serialized_start=637 + _OPDEF_ARGDEF._serialized_end=924 + _OPDEF_ATTRDEF._serialized_start=927 + _OPDEF_ATTRDEF._serialized_end=1118 + _OPDEPRECATION._serialized_start=1120 + _OPDEPRECATION._serialized_end=1173 + _OPLIST._serialized_start=1175 + _OPLIST._serialized_end=1215 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/saved_object_graph_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/saved_object_graph_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..d487f1c8d2391b85782e299022e904816c355275 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/saved_object_graph_pb2.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/saved_object_graph.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from tensorboard.compat.proto import tensor_shape_pb2 as tensorboard_dot_compat_dot_proto_dot_tensor__shape__pb2 +from tensorboard.compat.proto import types_pb2 as tensorboard_dot_compat_dot_proto_dot_types__pb2 +from tensorboard.compat.proto import variable_pb2 as tensorboard_dot_compat_dot_proto_dot_variable__pb2 +from tensorboard.compat.proto import versions_pb2 as tensorboard_dot_compat_dot_proto_dot_versions__pb2 +from tensorboard.compat.proto import struct_pb2 as tensorboard_dot_compat_dot_proto_dot_struct__pb2 +from tensorboard.compat.proto import trackable_object_graph_pb2 as tensorboard_dot_compat_dot_proto_dot_trackable__object__graph__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n1tensorboard/compat/proto/saved_object_graph.proto\x12\x0btensorboard\x1a\x19google/protobuf/any.proto\x1a+tensorboard/compat/proto/tensor_shape.proto\x1a$tensorboard/compat/proto/types.proto\x1a\'tensorboard/compat/proto/variable.proto\x1a\'tensorboard/compat/proto/versions.proto\x1a%tensorboard/compat/proto/struct.proto\x1a\x35tensorboard/compat/proto/trackable_object_graph.proto\"\xeb\x01\n\x10SavedObjectGraph\x12\'\n\x05nodes\x18\x01 \x03(\x0b\x32\x18.tensorboard.SavedObject\x12P\n\x12\x63oncrete_functions\x18\x02 \x03(\x0b\x32\x34.tensorboard.SavedObjectGraph.ConcreteFunctionsEntry\x1a\\\n\x16\x43oncreteFunctionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".tensorboard.SavedConcreteFunction:\x02\x38\x01\"\xdd\x07\n\x0bSavedObject\x12S\n\x08\x63hildren\x18\x01 \x03(\x0b\x32\x41.tensorboard.TrackableObjectGraph.TrackableObject.ObjectReference\x12W\n\x0c\x64\x65pendencies\x18\x0f \x03(\x0b\x32\x41.tensorboard.TrackableObjectGraph.TrackableObject.ObjectReference\x12_\n\x0eslot_variables\x18\x03 \x03(\x0b\x32G.tensorboard.TrackableObjectGraph.TrackableObject.SlotVariableReference\x12\x33\n\x0buser_object\x18\x04 \x01(\x0b\x32\x1c.tensorboard.SavedUserObjectH\x00\x12(\n\x05\x61sset\x18\x05 \x01(\x0b\x32\x17.tensorboard.SavedAssetH\x00\x12.\n\x08\x66unction\x18\x06 \x01(\x0b\x32\x1a.tensorboard.SavedFunctionH\x00\x12.\n\x08variable\x18\x07 \x01(\x0b\x32\x1a.tensorboard.SavedVariableH\x00\x12H\n\x16\x62\x61re_concrete_function\x18\x08 \x01(\x0b\x32&.tensorboard.SavedBareConcreteFunctionH\x00\x12.\n\x08\x63onstant\x18\t \x01(\x0b\x32\x1a.tensorboard.SavedConstantH\x00\x12.\n\x08resource\x18\n \x01(\x0b\x32\x1a.tensorboard.SavedResourceH\x00\x12\x36\n\x0f\x63\x61ptured_tensor\x18\x0c \x01(\x0b\x32\x1b.tensorboard.CapturedTensorH\x00\x12G\n\x10saveable_objects\x18\x0b \x03(\x0b\x32-.tensorboard.SavedObject.SaveableObjectsEntry\x12\x17\n\x0fregistered_name\x18\r \x01(\t\x12\x33\n\x15serialized_user_proto\x18\x0e \x01(\x0b\x32\x14.google.protobuf.Any\x12\x18\n\x10registered_saver\x18\x10 \x01(\t\x1aS\n\x14SaveableObjectsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.tensorboard.SaveableObject:\x02\x38\x01\x42\x06\n\x04kindJ\x04\x08\x02\x10\x03R\nattributes\"e\n\x0fSavedUserObject\x12\x12\n\nidentifier\x18\x01 \x01(\t\x12(\n\x07version\x18\x02 \x01(\x0b\x32\x17.tensorboard.VersionDef\x12\x14\n\x08metadata\x18\x03 \x01(\tB\x02\x18\x01\"*\n\nSavedAsset\x12\x1c\n\x14\x61sset_file_def_index\x18\x01 \x01(\x05\"]\n\rSavedFunction\x12\x1a\n\x12\x63oncrete_functions\x18\x01 \x03(\t\x12\x30\n\rfunction_spec\x18\x02 \x01(\x0b\x32\x19.tensorboard.FunctionSpec\"9\n\x0e\x43\x61pturedTensor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63oncrete_function\x18\x02 \x01(\t\"\xaa\x01\n\x15SavedConcreteFunction\x12\x14\n\x0c\x62ound_inputs\x18\x02 \x03(\x05\x12\x43\n\x1d\x63\x61nonicalized_input_signature\x18\x03 \x01(\x0b\x32\x1c.tensorboard.StructuredValue\x12\x36\n\x10output_signature\x18\x04 \x01(\x0b\x32\x1c.tensorboard.StructuredValue\"\xae\x01\n\x19SavedBareConcreteFunction\x12\x1e\n\x16\x63oncrete_function_name\x18\x01 \x01(\t\x12\x19\n\x11\x61rgument_keywords\x18\x02 \x03(\t\x12$\n\x1c\x61llowed_positional_arguments\x18\x03 \x01(\x03\x12\x30\n\rfunction_spec\x18\x04 \x01(\x0b\x32\x19.tensorboard.FunctionSpec\"\"\n\rSavedConstant\x12\x11\n\toperation\x18\x01 \x01(\t\"\xdc\x02\n\rSavedVariable\x12$\n\x05\x64type\x18\x01 \x01(\x0e\x32\x15.tensorboard.DataType\x12,\n\x05shape\x18\x02 \x01(\x0b\x32\x1d.tensorboard.TensorShapeProto\x12\x11\n\ttrainable\x18\x03 \x01(\x08\x12=\n\x0fsynchronization\x18\x04 \x01(\x0e\x32$.tensorboard.VariableSynchronization\x12\x35\n\x0b\x61ggregation\x18\x05 \x01(\x0e\x32 .tensorboard.VariableAggregation\x12\x0c\n\x04name\x18\x06 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x07 \x01(\t\x12P\n,experimental_distributed_variable_components\x18\x08 \x03(\x0b\x32\x1a.tensorboard.SavedVariable\"\xfe\x01\n\x0c\x46unctionSpec\x12\x31\n\x0b\x66ullargspec\x18\x01 \x01(\x0b\x32\x1c.tensorboard.StructuredValue\x12\x11\n\tis_method\x18\x02 \x01(\x08\x12\x35\n\x0finput_signature\x18\x05 \x01(\x0b\x32\x1c.tensorboard.StructuredValue\x12\x39\n\x0bjit_compile\x18\x06 \x01(\x0e\x32$.tensorboard.FunctionSpec.JitCompile\"*\n\nJitCompile\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x06\n\x02ON\x10\x01\x12\x07\n\x03OFF\x10\x02J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05\"\x1f\n\rSavedResource\x12\x0e\n\x06\x64\x65vice\x18\x01 \x01(\t\"A\n\x0eSaveableObject\x12\x15\n\rsave_function\x18\x02 \x01(\x05\x12\x18\n\x10restore_function\x18\x03 \x01(\x05\x42ZZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_SAVEDOBJECTGRAPH = DESCRIPTOR.message_types_by_name['SavedObjectGraph'] +_SAVEDOBJECTGRAPH_CONCRETEFUNCTIONSENTRY = _SAVEDOBJECTGRAPH.nested_types_by_name['ConcreteFunctionsEntry'] +_SAVEDOBJECT = DESCRIPTOR.message_types_by_name['SavedObject'] +_SAVEDOBJECT_SAVEABLEOBJECTSENTRY = _SAVEDOBJECT.nested_types_by_name['SaveableObjectsEntry'] +_SAVEDUSEROBJECT = DESCRIPTOR.message_types_by_name['SavedUserObject'] +_SAVEDASSET = DESCRIPTOR.message_types_by_name['SavedAsset'] +_SAVEDFUNCTION = DESCRIPTOR.message_types_by_name['SavedFunction'] +_CAPTUREDTENSOR = DESCRIPTOR.message_types_by_name['CapturedTensor'] +_SAVEDCONCRETEFUNCTION = DESCRIPTOR.message_types_by_name['SavedConcreteFunction'] +_SAVEDBARECONCRETEFUNCTION = DESCRIPTOR.message_types_by_name['SavedBareConcreteFunction'] +_SAVEDCONSTANT = DESCRIPTOR.message_types_by_name['SavedConstant'] +_SAVEDVARIABLE = DESCRIPTOR.message_types_by_name['SavedVariable'] +_FUNCTIONSPEC = DESCRIPTOR.message_types_by_name['FunctionSpec'] +_SAVEDRESOURCE = DESCRIPTOR.message_types_by_name['SavedResource'] +_SAVEABLEOBJECT = DESCRIPTOR.message_types_by_name['SaveableObject'] +_FUNCTIONSPEC_JITCOMPILE = _FUNCTIONSPEC.enum_types_by_name['JitCompile'] +SavedObjectGraph = _reflection.GeneratedProtocolMessageType('SavedObjectGraph', (_message.Message,), { + + 'ConcreteFunctionsEntry' : _reflection.GeneratedProtocolMessageType('ConcreteFunctionsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SAVEDOBJECTGRAPH_CONCRETEFUNCTIONSENTRY, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedObjectGraph.ConcreteFunctionsEntry) + }) + , + 'DESCRIPTOR' : _SAVEDOBJECTGRAPH, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedObjectGraph) + }) +_sym_db.RegisterMessage(SavedObjectGraph) +_sym_db.RegisterMessage(SavedObjectGraph.ConcreteFunctionsEntry) + +SavedObject = _reflection.GeneratedProtocolMessageType('SavedObject', (_message.Message,), { + + 'SaveableObjectsEntry' : _reflection.GeneratedProtocolMessageType('SaveableObjectsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SAVEDOBJECT_SAVEABLEOBJECTSENTRY, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedObject.SaveableObjectsEntry) + }) + , + 'DESCRIPTOR' : _SAVEDOBJECT, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedObject) + }) +_sym_db.RegisterMessage(SavedObject) +_sym_db.RegisterMessage(SavedObject.SaveableObjectsEntry) + +SavedUserObject = _reflection.GeneratedProtocolMessageType('SavedUserObject', (_message.Message,), { + 'DESCRIPTOR' : _SAVEDUSEROBJECT, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedUserObject) + }) +_sym_db.RegisterMessage(SavedUserObject) + +SavedAsset = _reflection.GeneratedProtocolMessageType('SavedAsset', (_message.Message,), { + 'DESCRIPTOR' : _SAVEDASSET, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedAsset) + }) +_sym_db.RegisterMessage(SavedAsset) + +SavedFunction = _reflection.GeneratedProtocolMessageType('SavedFunction', (_message.Message,), { + 'DESCRIPTOR' : _SAVEDFUNCTION, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedFunction) + }) +_sym_db.RegisterMessage(SavedFunction) + +CapturedTensor = _reflection.GeneratedProtocolMessageType('CapturedTensor', (_message.Message,), { + 'DESCRIPTOR' : _CAPTUREDTENSOR, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CapturedTensor) + }) +_sym_db.RegisterMessage(CapturedTensor) + +SavedConcreteFunction = _reflection.GeneratedProtocolMessageType('SavedConcreteFunction', (_message.Message,), { + 'DESCRIPTOR' : _SAVEDCONCRETEFUNCTION, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedConcreteFunction) + }) +_sym_db.RegisterMessage(SavedConcreteFunction) + +SavedBareConcreteFunction = _reflection.GeneratedProtocolMessageType('SavedBareConcreteFunction', (_message.Message,), { + 'DESCRIPTOR' : _SAVEDBARECONCRETEFUNCTION, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedBareConcreteFunction) + }) +_sym_db.RegisterMessage(SavedBareConcreteFunction) + +SavedConstant = _reflection.GeneratedProtocolMessageType('SavedConstant', (_message.Message,), { + 'DESCRIPTOR' : _SAVEDCONSTANT, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedConstant) + }) +_sym_db.RegisterMessage(SavedConstant) + +SavedVariable = _reflection.GeneratedProtocolMessageType('SavedVariable', (_message.Message,), { + 'DESCRIPTOR' : _SAVEDVARIABLE, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedVariable) + }) +_sym_db.RegisterMessage(SavedVariable) + +FunctionSpec = _reflection.GeneratedProtocolMessageType('FunctionSpec', (_message.Message,), { + 'DESCRIPTOR' : _FUNCTIONSPEC, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.FunctionSpec) + }) +_sym_db.RegisterMessage(FunctionSpec) + +SavedResource = _reflection.GeneratedProtocolMessageType('SavedResource', (_message.Message,), { + 'DESCRIPTOR' : _SAVEDRESOURCE, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SavedResource) + }) +_sym_db.RegisterMessage(SavedResource) + +SaveableObject = _reflection.GeneratedProtocolMessageType('SaveableObject', (_message.Message,), { + 'DESCRIPTOR' : _SAVEABLEOBJECT, + '__module__' : 'tensorboard.compat.proto.saved_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.SaveableObject) + }) +_sym_db.RegisterMessage(SaveableObject) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\370\001\001' + _SAVEDOBJECTGRAPH_CONCRETEFUNCTIONSENTRY._options = None + _SAVEDOBJECTGRAPH_CONCRETEFUNCTIONSENTRY._serialized_options = b'8\001' + _SAVEDOBJECT_SAVEABLEOBJECTSENTRY._options = None + _SAVEDOBJECT_SAVEABLEOBJECTSENTRY._serialized_options = b'8\001' + _SAVEDUSEROBJECT.fields_by_name['metadata']._options = None + _SAVEDUSEROBJECT.fields_by_name['metadata']._serialized_options = b'\030\001' + _SAVEDOBJECTGRAPH._serialized_start=353 + _SAVEDOBJECTGRAPH._serialized_end=588 + _SAVEDOBJECTGRAPH_CONCRETEFUNCTIONSENTRY._serialized_start=496 + _SAVEDOBJECTGRAPH_CONCRETEFUNCTIONSENTRY._serialized_end=588 + _SAVEDOBJECT._serialized_start=591 + _SAVEDOBJECT._serialized_end=1580 + _SAVEDOBJECT_SAVEABLEOBJECTSENTRY._serialized_start=1471 + _SAVEDOBJECT_SAVEABLEOBJECTSENTRY._serialized_end=1554 + _SAVEDUSEROBJECT._serialized_start=1582 + _SAVEDUSEROBJECT._serialized_end=1683 + _SAVEDASSET._serialized_start=1685 + _SAVEDASSET._serialized_end=1727 + _SAVEDFUNCTION._serialized_start=1729 + _SAVEDFUNCTION._serialized_end=1822 + _CAPTUREDTENSOR._serialized_start=1824 + _CAPTUREDTENSOR._serialized_end=1881 + _SAVEDCONCRETEFUNCTION._serialized_start=1884 + _SAVEDCONCRETEFUNCTION._serialized_end=2054 + _SAVEDBARECONCRETEFUNCTION._serialized_start=2057 + _SAVEDBARECONCRETEFUNCTION._serialized_end=2231 + _SAVEDCONSTANT._serialized_start=2233 + _SAVEDCONSTANT._serialized_end=2267 + _SAVEDVARIABLE._serialized_start=2270 + _SAVEDVARIABLE._serialized_end=2618 + _FUNCTIONSPEC._serialized_start=2621 + _FUNCTIONSPEC._serialized_end=2875 + _FUNCTIONSPEC_JITCOMPILE._serialized_start=2821 + _FUNCTIONSPEC_JITCOMPILE._serialized_end=2863 + _SAVEDRESOURCE._serialized_start=2877 + _SAVEDRESOURCE._serialized_end=2908 + _SAVEABLEOBJECT._serialized_start=2910 + _SAVEABLEOBJECT._serialized_end=2975 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/step_stats_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/step_stats_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..aac84b1edd59b09e05e1f3289cc9b6c672f865a7 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/step_stats_pb2.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/step_stats.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import allocation_description_pb2 as tensorboard_dot_compat_dot_proto_dot_allocation__description__pb2 +from tensorboard.compat.proto import tensor_description_pb2 as tensorboard_dot_compat_dot_proto_dot_tensor__description__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)tensorboard/compat/proto/step_stats.proto\x12\x0btensorboard\x1a\x35tensorboard/compat/proto/allocation_description.proto\x1a\x31tensorboard/compat/proto/tensor_description.proto\"=\n\x10\x41llocationRecord\x12\x14\n\x0c\x61lloc_micros\x18\x01 \x01(\x03\x12\x13\n\x0b\x61lloc_bytes\x18\x02 \x01(\x03\"\xc5\x01\n\x13\x41llocatorMemoryUsed\x12\x16\n\x0e\x61llocator_name\x18\x01 \x01(\t\x12\x13\n\x0btotal_bytes\x18\x02 \x01(\x03\x12\x12\n\npeak_bytes\x18\x03 \x01(\x03\x12\x12\n\nlive_bytes\x18\x04 \x01(\x03\x12\x39\n\x12\x61llocation_records\x18\x06 \x03(\x0b\x32\x1d.tensorboard.AllocationRecord\x12\x1e\n\x16\x61llocator_bytes_in_use\x18\x05 \x01(\x03\"V\n\nNodeOutput\x12\x0c\n\x04slot\x18\x01 \x01(\x05\x12:\n\x12tensor_description\x18\x03 \x01(\x0b\x32\x1e.tensorboard.TensorDescription\"\xec\x01\n\x0bMemoryStats\x12\x18\n\x10temp_memory_size\x18\x01 \x01(\x03\x12\x1e\n\x16persistent_memory_size\x18\x03 \x01(\x03\x12#\n\x1bpersistent_tensor_alloc_ids\x18\x05 \x03(\x03\x12#\n\x17\x64\x65vice_temp_memory_size\x18\x02 \x01(\x03\x42\x02\x18\x01\x12)\n\x1d\x64\x65vice_persistent_memory_size\x18\x04 \x01(\x03\x42\x02\x18\x01\x12.\n\"device_persistent_tensor_alloc_ids\x18\x06 \x03(\x03\x42\x02\x18\x01\"\xa2\x04\n\rNodeExecStats\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x18\n\x10\x61ll_start_micros\x18\x02 \x01(\x03\x12\x1b\n\x13op_start_rel_micros\x18\x03 \x01(\x03\x12\x19\n\x11op_end_rel_micros\x18\x04 \x01(\x03\x12\x1a\n\x12\x61ll_end_rel_micros\x18\x05 \x01(\x03\x12\x30\n\x06memory\x18\x06 \x03(\x0b\x32 .tensorboard.AllocatorMemoryUsed\x12\'\n\x06output\x18\x07 \x03(\x0b\x32\x17.tensorboard.NodeOutput\x12\x16\n\x0etimeline_label\x18\x08 \x01(\t\x12\x18\n\x10scheduled_micros\x18\t \x01(\x03\x12\x11\n\tthread_id\x18\n \x01(\r\x12=\n\x11referenced_tensor\x18\x0b \x03(\x0b\x32\".tensorboard.AllocationDescription\x12.\n\x0cmemory_stats\x18\x0c \x01(\x0b\x32\x18.tensorboard.MemoryStats\x12\x17\n\x0f\x61ll_start_nanos\x18\r \x01(\x03\x12\x1a\n\x12op_start_rel_nanos\x18\x0e \x01(\x03\x12\x18\n\x10op_end_rel_nanos\x18\x0f \x01(\x03\x12\x19\n\x11\x61ll_end_rel_nanos\x18\x10 \x01(\x03\x12\x17\n\x0fscheduled_nanos\x18\x11 \x01(\x03\"\xca\x01\n\x0f\x44\x65viceStepStats\x12\x0e\n\x06\x64\x65vice\x18\x01 \x01(\t\x12.\n\nnode_stats\x18\x02 \x03(\x0b\x32\x1a.tensorboard.NodeExecStats\x12\x43\n\x0cthread_names\x18\x03 \x03(\x0b\x32-.tensorboard.DeviceStepStats.ThreadNamesEntry\x1a\x32\n\x10ThreadNamesEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"<\n\tStepStats\x12/\n\tdev_stats\x18\x01 \x03(\x0b\x32\x1c.tensorboard.DeviceStepStatsB\x83\x01\n\x18org.tensorflow.frameworkB\x0fStepStatsProtosP\x01ZQgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/step_stats_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_ALLOCATIONRECORD = DESCRIPTOR.message_types_by_name['AllocationRecord'] +_ALLOCATORMEMORYUSED = DESCRIPTOR.message_types_by_name['AllocatorMemoryUsed'] +_NODEOUTPUT = DESCRIPTOR.message_types_by_name['NodeOutput'] +_MEMORYSTATS = DESCRIPTOR.message_types_by_name['MemoryStats'] +_NODEEXECSTATS = DESCRIPTOR.message_types_by_name['NodeExecStats'] +_DEVICESTEPSTATS = DESCRIPTOR.message_types_by_name['DeviceStepStats'] +_DEVICESTEPSTATS_THREADNAMESENTRY = _DEVICESTEPSTATS.nested_types_by_name['ThreadNamesEntry'] +_STEPSTATS = DESCRIPTOR.message_types_by_name['StepStats'] +AllocationRecord = _reflection.GeneratedProtocolMessageType('AllocationRecord', (_message.Message,), { + 'DESCRIPTOR' : _ALLOCATIONRECORD, + '__module__' : 'tensorboard.compat.proto.step_stats_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.AllocationRecord) + }) +_sym_db.RegisterMessage(AllocationRecord) + +AllocatorMemoryUsed = _reflection.GeneratedProtocolMessageType('AllocatorMemoryUsed', (_message.Message,), { + 'DESCRIPTOR' : _ALLOCATORMEMORYUSED, + '__module__' : 'tensorboard.compat.proto.step_stats_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.AllocatorMemoryUsed) + }) +_sym_db.RegisterMessage(AllocatorMemoryUsed) + +NodeOutput = _reflection.GeneratedProtocolMessageType('NodeOutput', (_message.Message,), { + 'DESCRIPTOR' : _NODEOUTPUT, + '__module__' : 'tensorboard.compat.proto.step_stats_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.NodeOutput) + }) +_sym_db.RegisterMessage(NodeOutput) + +MemoryStats = _reflection.GeneratedProtocolMessageType('MemoryStats', (_message.Message,), { + 'DESCRIPTOR' : _MEMORYSTATS, + '__module__' : 'tensorboard.compat.proto.step_stats_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.MemoryStats) + }) +_sym_db.RegisterMessage(MemoryStats) + +NodeExecStats = _reflection.GeneratedProtocolMessageType('NodeExecStats', (_message.Message,), { + 'DESCRIPTOR' : _NODEEXECSTATS, + '__module__' : 'tensorboard.compat.proto.step_stats_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.NodeExecStats) + }) +_sym_db.RegisterMessage(NodeExecStats) + +DeviceStepStats = _reflection.GeneratedProtocolMessageType('DeviceStepStats', (_message.Message,), { + + 'ThreadNamesEntry' : _reflection.GeneratedProtocolMessageType('ThreadNamesEntry', (_message.Message,), { + 'DESCRIPTOR' : _DEVICESTEPSTATS_THREADNAMESENTRY, + '__module__' : 'tensorboard.compat.proto.step_stats_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.DeviceStepStats.ThreadNamesEntry) + }) + , + 'DESCRIPTOR' : _DEVICESTEPSTATS, + '__module__' : 'tensorboard.compat.proto.step_stats_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.DeviceStepStats) + }) +_sym_db.RegisterMessage(DeviceStepStats) +_sym_db.RegisterMessage(DeviceStepStats.ThreadNamesEntry) + +StepStats = _reflection.GeneratedProtocolMessageType('StepStats', (_message.Message,), { + 'DESCRIPTOR' : _STEPSTATS, + '__module__' : 'tensorboard.compat.proto.step_stats_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.StepStats) + }) +_sym_db.RegisterMessage(StepStats) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\017StepStatsProtosP\001ZQgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/step_stats_go_proto\370\001\001' + _MEMORYSTATS.fields_by_name['device_temp_memory_size']._options = None + _MEMORYSTATS.fields_by_name['device_temp_memory_size']._serialized_options = b'\030\001' + _MEMORYSTATS.fields_by_name['device_persistent_memory_size']._options = None + _MEMORYSTATS.fields_by_name['device_persistent_memory_size']._serialized_options = b'\030\001' + _MEMORYSTATS.fields_by_name['device_persistent_tensor_alloc_ids']._options = None + _MEMORYSTATS.fields_by_name['device_persistent_tensor_alloc_ids']._serialized_options = b'\030\001' + _DEVICESTEPSTATS_THREADNAMESENTRY._options = None + _DEVICESTEPSTATS_THREADNAMESENTRY._serialized_options = b'8\001' + _ALLOCATIONRECORD._serialized_start=164 + _ALLOCATIONRECORD._serialized_end=225 + _ALLOCATORMEMORYUSED._serialized_start=228 + _ALLOCATORMEMORYUSED._serialized_end=425 + _NODEOUTPUT._serialized_start=427 + _NODEOUTPUT._serialized_end=513 + _MEMORYSTATS._serialized_start=516 + _MEMORYSTATS._serialized_end=752 + _NODEEXECSTATS._serialized_start=755 + _NODEEXECSTATS._serialized_end=1301 + _DEVICESTEPSTATS._serialized_start=1304 + _DEVICESTEPSTATS._serialized_end=1506 + _DEVICESTEPSTATS_THREADNAMESENTRY._serialized_start=1456 + _DEVICESTEPSTATS_THREADNAMESENTRY._serialized_end=1506 + _STEPSTATS._serialized_start=1508 + _STEPSTATS._serialized_end=1568 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/struct_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/struct_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..abef4b3e2c538c26f1f8d66301cc5d892f1dd5a1 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/struct_pb2.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/struct.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import tensor_pb2 as tensorboard_dot_compat_dot_proto_dot_tensor__pb2 +from tensorboard.compat.proto import tensor_shape_pb2 as tensorboard_dot_compat_dot_proto_dot_tensor__shape__pb2 +from tensorboard.compat.proto import types_pb2 as tensorboard_dot_compat_dot_proto_dot_types__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%tensorboard/compat/proto/struct.proto\x12\x0btensorboard\x1a%tensorboard/compat/proto/tensor.proto\x1a+tensorboard/compat/proto/tensor_shape.proto\x1a$tensorboard/compat/proto/types.proto\"\xfd\x05\n\x0fStructuredValue\x12,\n\nnone_value\x18\x01 \x01(\x0b\x32\x16.tensorboard.NoneValueH\x00\x12\x17\n\rfloat64_value\x18\x0b \x01(\x01H\x00\x12\x15\n\x0bint64_value\x18\x0c \x01(\x12H\x00\x12\x16\n\x0cstring_value\x18\r \x01(\tH\x00\x12\x14\n\nbool_value\x18\x0e \x01(\x08H\x00\x12;\n\x12tensor_shape_value\x18\x1f \x01(\x0b\x32\x1d.tensorboard.TensorShapeProtoH\x00\x12\x33\n\x12tensor_dtype_value\x18 \x01(\x0e\x32\x15.tensorboard.DataTypeH\x00\x12\x39\n\x11tensor_spec_value\x18! \x01(\x0b\x32\x1c.tensorboard.TensorSpecProtoH\x00\x12\x35\n\x0ftype_spec_value\x18\" \x01(\x0b\x32\x1a.tensorboard.TypeSpecProtoH\x00\x12H\n\x19\x62ounded_tensor_spec_value\x18# \x01(\x0b\x32#.tensorboard.BoundedTensorSpecProtoH\x00\x12,\n\nlist_value\x18\x33 \x01(\x0b\x32\x16.tensorboard.ListValueH\x00\x12.\n\x0btuple_value\x18\x34 \x01(\x0b\x32\x17.tensorboard.TupleValueH\x00\x12,\n\ndict_value\x18\x35 \x01(\x0b\x32\x16.tensorboard.DictValueH\x00\x12\x39\n\x11named_tuple_value\x18\x36 \x01(\x0b\x32\x1c.tensorboard.NamedTupleValueH\x00\x12\x30\n\x0ctensor_value\x18\x37 \x01(\x0b\x32\x18.tensorboard.TensorProtoH\x00\x12/\n\x0bnumpy_value\x18\x38 \x01(\x0b\x32\x18.tensorboard.TensorProtoH\x00\x42\x06\n\x04kind\"\x0b\n\tNoneValue\"9\n\tListValue\x12,\n\x06values\x18\x01 \x03(\x0b\x32\x1c.tensorboard.StructuredValue\":\n\nTupleValue\x12,\n\x06values\x18\x01 \x03(\x0b\x32\x1c.tensorboard.StructuredValue\"\x8c\x01\n\tDictValue\x12\x32\n\x06\x66ields\x18\x01 \x03(\x0b\x32\".tensorboard.DictValue.FieldsEntry\x1aK\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.tensorboard.StructuredValue:\x02\x38\x01\"E\n\tPairValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.tensorboard.StructuredValue\"G\n\x0fNamedTupleValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x06values\x18\x02 \x03(\x0b\x32\x16.tensorboard.PairValue\"s\n\x0fTensorSpecProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x05shape\x18\x02 \x01(\x0b\x32\x1d.tensorboard.TensorShapeProto\x12$\n\x05\x64type\x18\x03 \x01(\x0e\x32\x15.tensorboard.DataType\"\xd0\x01\n\x16\x42oundedTensorSpecProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x05shape\x18\x02 \x01(\x0b\x32\x1d.tensorboard.TensorShapeProto\x12$\n\x05\x64type\x18\x03 \x01(\x0e\x32\x15.tensorboard.DataType\x12)\n\x07minimum\x18\x04 \x01(\x0b\x32\x18.tensorboard.TensorProto\x12)\n\x07maximum\x18\x05 \x01(\x0b\x32\x18.tensorboard.TensorProto\"\xfa\x03\n\rTypeSpecProto\x12\x41\n\x0ftype_spec_class\x18\x01 \x01(\x0e\x32(.tensorboard.TypeSpecProto.TypeSpecClass\x12\x30\n\ntype_state\x18\x02 \x01(\x0b\x32\x1c.tensorboard.StructuredValue\x12\x1c\n\x14type_spec_class_name\x18\x03 \x01(\t\x12\x1b\n\x13num_flat_components\x18\x04 \x01(\x05\"\xb8\x02\n\rTypeSpecClass\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x16\n\x12SPARSE_TENSOR_SPEC\x10\x01\x12\x17\n\x13INDEXED_SLICES_SPEC\x10\x02\x12\x16\n\x12RAGGED_TENSOR_SPEC\x10\x03\x12\x15\n\x11TENSOR_ARRAY_SPEC\x10\x04\x12\x15\n\x11\x44\x41TA_DATASET_SPEC\x10\x05\x12\x16\n\x12\x44\x41TA_ITERATOR_SPEC\x10\x06\x12\x11\n\rOPTIONAL_SPEC\x10\x07\x12\x14\n\x10PER_REPLICA_SPEC\x10\x08\x12\x11\n\rVARIABLE_SPEC\x10\t\x12\x16\n\x12ROW_PARTITION_SPEC\x10\n\x12\x18\n\x14REGISTERED_TYPE_SPEC\x10\x0c\x12\x17\n\x13\x45XTENSION_TYPE_SPEC\x10\r\"\x04\x08\x0b\x10\x0b\x42WZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_protob\x06proto3') + + + +_STRUCTUREDVALUE = DESCRIPTOR.message_types_by_name['StructuredValue'] +_NONEVALUE = DESCRIPTOR.message_types_by_name['NoneValue'] +_LISTVALUE = DESCRIPTOR.message_types_by_name['ListValue'] +_TUPLEVALUE = DESCRIPTOR.message_types_by_name['TupleValue'] +_DICTVALUE = DESCRIPTOR.message_types_by_name['DictValue'] +_DICTVALUE_FIELDSENTRY = _DICTVALUE.nested_types_by_name['FieldsEntry'] +_PAIRVALUE = DESCRIPTOR.message_types_by_name['PairValue'] +_NAMEDTUPLEVALUE = DESCRIPTOR.message_types_by_name['NamedTupleValue'] +_TENSORSPECPROTO = DESCRIPTOR.message_types_by_name['TensorSpecProto'] +_BOUNDEDTENSORSPECPROTO = DESCRIPTOR.message_types_by_name['BoundedTensorSpecProto'] +_TYPESPECPROTO = DESCRIPTOR.message_types_by_name['TypeSpecProto'] +_TYPESPECPROTO_TYPESPECCLASS = _TYPESPECPROTO.enum_types_by_name['TypeSpecClass'] +StructuredValue = _reflection.GeneratedProtocolMessageType('StructuredValue', (_message.Message,), { + 'DESCRIPTOR' : _STRUCTUREDVALUE, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.StructuredValue) + }) +_sym_db.RegisterMessage(StructuredValue) + +NoneValue = _reflection.GeneratedProtocolMessageType('NoneValue', (_message.Message,), { + 'DESCRIPTOR' : _NONEVALUE, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.NoneValue) + }) +_sym_db.RegisterMessage(NoneValue) + +ListValue = _reflection.GeneratedProtocolMessageType('ListValue', (_message.Message,), { + 'DESCRIPTOR' : _LISTVALUE, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ListValue) + }) +_sym_db.RegisterMessage(ListValue) + +TupleValue = _reflection.GeneratedProtocolMessageType('TupleValue', (_message.Message,), { + 'DESCRIPTOR' : _TUPLEVALUE, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.TupleValue) + }) +_sym_db.RegisterMessage(TupleValue) + +DictValue = _reflection.GeneratedProtocolMessageType('DictValue', (_message.Message,), { + + 'FieldsEntry' : _reflection.GeneratedProtocolMessageType('FieldsEntry', (_message.Message,), { + 'DESCRIPTOR' : _DICTVALUE_FIELDSENTRY, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.DictValue.FieldsEntry) + }) + , + 'DESCRIPTOR' : _DICTVALUE, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.DictValue) + }) +_sym_db.RegisterMessage(DictValue) +_sym_db.RegisterMessage(DictValue.FieldsEntry) + +PairValue = _reflection.GeneratedProtocolMessageType('PairValue', (_message.Message,), { + 'DESCRIPTOR' : _PAIRVALUE, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.PairValue) + }) +_sym_db.RegisterMessage(PairValue) + +NamedTupleValue = _reflection.GeneratedProtocolMessageType('NamedTupleValue', (_message.Message,), { + 'DESCRIPTOR' : _NAMEDTUPLEVALUE, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.NamedTupleValue) + }) +_sym_db.RegisterMessage(NamedTupleValue) + +TensorSpecProto = _reflection.GeneratedProtocolMessageType('TensorSpecProto', (_message.Message,), { + 'DESCRIPTOR' : _TENSORSPECPROTO, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.TensorSpecProto) + }) +_sym_db.RegisterMessage(TensorSpecProto) + +BoundedTensorSpecProto = _reflection.GeneratedProtocolMessageType('BoundedTensorSpecProto', (_message.Message,), { + 'DESCRIPTOR' : _BOUNDEDTENSORSPECPROTO, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.BoundedTensorSpecProto) + }) +_sym_db.RegisterMessage(BoundedTensorSpecProto) + +TypeSpecProto = _reflection.GeneratedProtocolMessageType('TypeSpecProto', (_message.Message,), { + 'DESCRIPTOR' : _TYPESPECPROTO, + '__module__' : 'tensorboard.compat.proto.struct_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.TypeSpecProto) + }) +_sym_db.RegisterMessage(TypeSpecProto) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto' + _DICTVALUE_FIELDSENTRY._options = None + _DICTVALUE_FIELDSENTRY._serialized_options = b'8\001' + _STRUCTUREDVALUE._serialized_start=177 + _STRUCTUREDVALUE._serialized_end=942 + _NONEVALUE._serialized_start=944 + _NONEVALUE._serialized_end=955 + _LISTVALUE._serialized_start=957 + _LISTVALUE._serialized_end=1014 + _TUPLEVALUE._serialized_start=1016 + _TUPLEVALUE._serialized_end=1074 + _DICTVALUE._serialized_start=1077 + _DICTVALUE._serialized_end=1217 + _DICTVALUE_FIELDSENTRY._serialized_start=1142 + _DICTVALUE_FIELDSENTRY._serialized_end=1217 + _PAIRVALUE._serialized_start=1219 + _PAIRVALUE._serialized_end=1288 + _NAMEDTUPLEVALUE._serialized_start=1290 + _NAMEDTUPLEVALUE._serialized_end=1361 + _TENSORSPECPROTO._serialized_start=1363 + _TENSORSPECPROTO._serialized_end=1478 + _BOUNDEDTENSORSPECPROTO._serialized_start=1481 + _BOUNDEDTENSORSPECPROTO._serialized_end=1689 + _TYPESPECPROTO._serialized_start=1692 + _TYPESPECPROTO._serialized_end=2198 + _TYPESPECPROTO_TYPESPECCLASS._serialized_start=1886 + _TYPESPECPROTO_TYPESPECCLASS._serialized_end=2198 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/tfprof_log_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/tfprof_log_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..adcc736cfd36d1372177d18e5565205445a385f5 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/tfprof_log_pb2.py @@ -0,0 +1,313 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/tfprof_log.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorboard.compat.proto import attr_value_pb2 as tensorboard_dot_compat_dot_proto_dot_attr__value__pb2 +from tensorboard.compat.proto import step_stats_pb2 as tensorboard_dot_compat_dot_proto_dot_step__stats__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)tensorboard/compat/proto/tfprof_log.proto\x12\x0btensorboard\x1a)tensorboard/compat/proto/attr_value.proto\x1a)tensorboard/compat/proto/step_stats.proto\"\xd9\x01\n\x07\x43odeDef\x12*\n\x06traces\x18\x01 \x03(\x0b\x32\x1a.tensorboard.CodeDef.Trace\x1a\xa1\x01\n\x05Trace\x12\x10\n\x04\x66ile\x18\x01 \x01(\tB\x02\x18\x01\x12\x0f\n\x07\x66ile_id\x18\x06 \x01(\x03\x12\x0e\n\x06lineno\x18\x02 \x01(\x05\x12\x14\n\x08\x66unction\x18\x03 \x01(\tB\x02\x18\x01\x12\x13\n\x0b\x66unction_id\x18\x07 \x01(\x03\x12\x10\n\x04line\x18\x04 \x01(\tB\x02\x18\x01\x12\x0f\n\x07line_id\x18\x08 \x01(\x03\x12\x17\n\x0f\x66unc_start_line\x18\x05 \x01(\x05\"d\n\nOpLogEntry\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tfloat_ops\x18\x02 \x01(\x03\x12\r\n\x05types\x18\x03 \x03(\t\x12&\n\x08\x63ode_def\x18\x04 \x01(\x0b\x32\x14.tensorboard.CodeDef\"\xac\x01\n\nOpLogProto\x12,\n\x0blog_entries\x18\x01 \x03(\x0b\x32\x17.tensorboard.OpLogEntry\x12=\n\x0cid_to_string\x18\x02 \x03(\x0b\x32\'.tensorboard.OpLogProto.IdToStringEntry\x1a\x31\n\x0fIdToStringEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc2\x02\n\x0cProfileProto\x12\x33\n\x05nodes\x18\x01 \x03(\x0b\x32$.tensorboard.ProfileProto.NodesEntry\x12\x11\n\thas_trace\x18\x02 \x01(\x08\x12\x1f\n\x17miss_accelerator_stream\x18\x05 \x01(\x08\x12\r\n\x05steps\x18\x03 \x03(\x03\x12?\n\x0cid_to_string\x18\x04 \x03(\x0b\x32).tensorboard.ProfileProto.IdToStringEntry\x1a\x46\n\nNodesEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.tensorboard.ProfileNode:\x02\x38\x01\x1a\x31\n\x0fIdToStringEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x92\x08\n\x0bProfileNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02op\x18\t \x01(\t\x12\n\n\x02id\x18\r \x01(\x03\x12\x34\n\x06inputs\x18\x02 \x03(\x0b\x32$.tensorboard.ProfileNode.InputsEntry\x12?\n\x0cinput_shapes\x18\x10 \x03(\x0b\x32).tensorboard.ProfileNode.InputShapesEntry\x12\x36\n\x07outputs\x18\x03 \x03(\x0b\x32%.tensorboard.ProfileNode.OutputsEntry\x12\x41\n\routput_shapes\x18\x0f \x03(\x0b\x32*.tensorboard.ProfileNode.OutputShapesEntry\x12\x46\n\x10src_output_index\x18\x0e \x03(\x0b\x32,.tensorboard.ProfileNode.SrcOutputIndexEntry\x12\r\n\x05shape\x18\x04 \x03(\x03\x12\x10\n\x08op_types\x18\x05 \x03(\t\x12\x18\n\x10\x63\x61nonical_device\x18\x06 \x01(\t\x12\x13\n\x0bhost_device\x18\x07 \x01(\t\x12\x11\n\tfloat_ops\x18\x08 \x01(\x03\x12#\n\x05trace\x18\n \x01(\x0b\x32\x14.tensorboard.CodeDef\x12\x32\n\x05\x61ttrs\x18\x0b \x03(\x0b\x32#.tensorboard.ProfileNode.AttrsEntry\x12\x32\n\x05\x65xecs\x18\x0c \x03(\x0b\x32#.tensorboard.ProfileNode.ExecsEntry\x1a-\n\x0bInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x46\n\x10InputShapesEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.tensorboard.Tuple:\x02\x38\x01\x1a.\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1aG\n\x11OutputShapesEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.tensorboard.Tuple:\x02\x38\x01\x1a\x35\n\x13SrcOutputIndexEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x44\n\nAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.tensorboard.AttrValue:\x02\x38\x01\x1a\x46\n\nExecsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.tensorboard.ExecProfile:\x02\x38\x01\"\xe7\x03\n\x0b\x45xecProfile\x12\x11\n\trun_count\x18\x01 \x01(\x03\x12\x18\n\x10\x61ll_start_micros\x18\x02 \x01(\x03\x12\x19\n\x11latest_end_micros\x18\x03 \x01(\x03\x12I\n\x11\x61\x63\x63\x65lerator_execs\x18\x04 \x03(\x0b\x32..tensorboard.ExecProfile.AcceleratorExecsEntry\x12\x39\n\tcpu_execs\x18\x05 \x03(\x0b\x32&.tensorboard.ExecProfile.CpuExecsEntry\x12-\n\x0cmemory_execs\x18\x07 \x03(\x0b\x32\x17.tensorboard.ExecMemory\x12\x32\n\x0b\x61llocations\x18\x0b \x03(\x0b\x32\x1d.tensorboard.AllocationRecord\x12\x0f\n\x07\x64\x65vices\x18\x06 \x03(\t\x1aN\n\x15\x41\x63\x63\x65leratorExecsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.tensorboard.ExecTime:\x02\x38\x01\x1a\x46\n\rCpuExecsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.tensorboard.ExecTime:\x02\x38\x01\"-\n\x08\x45xecTime\x12!\n\x05times\x18\x01 \x03(\x0b\x32\x12.tensorboard.Tuple\"\xa8\x03\n\nExecMemory\x12\x15\n\rmemory_micros\x18\x01 \x01(\x03\x12\x17\n\x0fhost_temp_bytes\x18\x02 \x01(\x03\x12\x1d\n\x15host_persistent_bytes\x18\x03 \x01(\x03\x12\x1e\n\x16\x61\x63\x63\x65lerator_temp_bytes\x18\x04 \x01(\x03\x12$\n\x1c\x61\x63\x63\x65lerator_persistent_bytes\x18\x05 \x01(\x03\x12\x17\n\x0frequested_bytes\x18\x06 \x01(\x03\x12\x12\n\npeak_bytes\x18\x07 \x01(\x03\x12\x16\n\x0eresidual_bytes\x18\x08 \x01(\x03\x12\x14\n\x0coutput_bytes\x18\t \x01(\x03\x12\x1e\n\x16\x61llocator_bytes_in_use\x18\n \x01(\x03\x12@\n\routput_memory\x18\x0b \x03(\x0b\x32).tensorboard.ExecMemory.OutputMemoryEntry\x1aH\n\x11OutputMemoryEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.tensorboard.Memory:\x02\x38\x01\"\x1d\n\x05Tuple\x12\x14\n\x0cint64_values\x18\x01 \x03(\x03\"$\n\x06Memory\x12\r\n\x05\x62ytes\x18\x01 \x01(\x03\x12\x0b\n\x03ptr\x18\x02 \x01(\x04\x42RZPgithub.com/tensorflow/tensorflow/tensorflow/go/core/profiler/protos_all_go_protob\x06proto3') + + + +_CODEDEF = DESCRIPTOR.message_types_by_name['CodeDef'] +_CODEDEF_TRACE = _CODEDEF.nested_types_by_name['Trace'] +_OPLOGENTRY = DESCRIPTOR.message_types_by_name['OpLogEntry'] +_OPLOGPROTO = DESCRIPTOR.message_types_by_name['OpLogProto'] +_OPLOGPROTO_IDTOSTRINGENTRY = _OPLOGPROTO.nested_types_by_name['IdToStringEntry'] +_PROFILEPROTO = DESCRIPTOR.message_types_by_name['ProfileProto'] +_PROFILEPROTO_NODESENTRY = _PROFILEPROTO.nested_types_by_name['NodesEntry'] +_PROFILEPROTO_IDTOSTRINGENTRY = _PROFILEPROTO.nested_types_by_name['IdToStringEntry'] +_PROFILENODE = DESCRIPTOR.message_types_by_name['ProfileNode'] +_PROFILENODE_INPUTSENTRY = _PROFILENODE.nested_types_by_name['InputsEntry'] +_PROFILENODE_INPUTSHAPESENTRY = _PROFILENODE.nested_types_by_name['InputShapesEntry'] +_PROFILENODE_OUTPUTSENTRY = _PROFILENODE.nested_types_by_name['OutputsEntry'] +_PROFILENODE_OUTPUTSHAPESENTRY = _PROFILENODE.nested_types_by_name['OutputShapesEntry'] +_PROFILENODE_SRCOUTPUTINDEXENTRY = _PROFILENODE.nested_types_by_name['SrcOutputIndexEntry'] +_PROFILENODE_ATTRSENTRY = _PROFILENODE.nested_types_by_name['AttrsEntry'] +_PROFILENODE_EXECSENTRY = _PROFILENODE.nested_types_by_name['ExecsEntry'] +_EXECPROFILE = DESCRIPTOR.message_types_by_name['ExecProfile'] +_EXECPROFILE_ACCELERATOREXECSENTRY = _EXECPROFILE.nested_types_by_name['AcceleratorExecsEntry'] +_EXECPROFILE_CPUEXECSENTRY = _EXECPROFILE.nested_types_by_name['CpuExecsEntry'] +_EXECTIME = DESCRIPTOR.message_types_by_name['ExecTime'] +_EXECMEMORY = DESCRIPTOR.message_types_by_name['ExecMemory'] +_EXECMEMORY_OUTPUTMEMORYENTRY = _EXECMEMORY.nested_types_by_name['OutputMemoryEntry'] +_TUPLE = DESCRIPTOR.message_types_by_name['Tuple'] +_MEMORY = DESCRIPTOR.message_types_by_name['Memory'] +CodeDef = _reflection.GeneratedProtocolMessageType('CodeDef', (_message.Message,), { + + 'Trace' : _reflection.GeneratedProtocolMessageType('Trace', (_message.Message,), { + 'DESCRIPTOR' : _CODEDEF_TRACE, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CodeDef.Trace) + }) + , + 'DESCRIPTOR' : _CODEDEF, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.CodeDef) + }) +_sym_db.RegisterMessage(CodeDef) +_sym_db.RegisterMessage(CodeDef.Trace) + +OpLogEntry = _reflection.GeneratedProtocolMessageType('OpLogEntry', (_message.Message,), { + 'DESCRIPTOR' : _OPLOGENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.OpLogEntry) + }) +_sym_db.RegisterMessage(OpLogEntry) + +OpLogProto = _reflection.GeneratedProtocolMessageType('OpLogProto', (_message.Message,), { + + 'IdToStringEntry' : _reflection.GeneratedProtocolMessageType('IdToStringEntry', (_message.Message,), { + 'DESCRIPTOR' : _OPLOGPROTO_IDTOSTRINGENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.OpLogProto.IdToStringEntry) + }) + , + 'DESCRIPTOR' : _OPLOGPROTO, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.OpLogProto) + }) +_sym_db.RegisterMessage(OpLogProto) +_sym_db.RegisterMessage(OpLogProto.IdToStringEntry) + +ProfileProto = _reflection.GeneratedProtocolMessageType('ProfileProto', (_message.Message,), { + + 'NodesEntry' : _reflection.GeneratedProtocolMessageType('NodesEntry', (_message.Message,), { + 'DESCRIPTOR' : _PROFILEPROTO_NODESENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileProto.NodesEntry) + }) + , + + 'IdToStringEntry' : _reflection.GeneratedProtocolMessageType('IdToStringEntry', (_message.Message,), { + 'DESCRIPTOR' : _PROFILEPROTO_IDTOSTRINGENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileProto.IdToStringEntry) + }) + , + 'DESCRIPTOR' : _PROFILEPROTO, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileProto) + }) +_sym_db.RegisterMessage(ProfileProto) +_sym_db.RegisterMessage(ProfileProto.NodesEntry) +_sym_db.RegisterMessage(ProfileProto.IdToStringEntry) + +ProfileNode = _reflection.GeneratedProtocolMessageType('ProfileNode', (_message.Message,), { + + 'InputsEntry' : _reflection.GeneratedProtocolMessageType('InputsEntry', (_message.Message,), { + 'DESCRIPTOR' : _PROFILENODE_INPUTSENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileNode.InputsEntry) + }) + , + + 'InputShapesEntry' : _reflection.GeneratedProtocolMessageType('InputShapesEntry', (_message.Message,), { + 'DESCRIPTOR' : _PROFILENODE_INPUTSHAPESENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileNode.InputShapesEntry) + }) + , + + 'OutputsEntry' : _reflection.GeneratedProtocolMessageType('OutputsEntry', (_message.Message,), { + 'DESCRIPTOR' : _PROFILENODE_OUTPUTSENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileNode.OutputsEntry) + }) + , + + 'OutputShapesEntry' : _reflection.GeneratedProtocolMessageType('OutputShapesEntry', (_message.Message,), { + 'DESCRIPTOR' : _PROFILENODE_OUTPUTSHAPESENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileNode.OutputShapesEntry) + }) + , + + 'SrcOutputIndexEntry' : _reflection.GeneratedProtocolMessageType('SrcOutputIndexEntry', (_message.Message,), { + 'DESCRIPTOR' : _PROFILENODE_SRCOUTPUTINDEXENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileNode.SrcOutputIndexEntry) + }) + , + + 'AttrsEntry' : _reflection.GeneratedProtocolMessageType('AttrsEntry', (_message.Message,), { + 'DESCRIPTOR' : _PROFILENODE_ATTRSENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileNode.AttrsEntry) + }) + , + + 'ExecsEntry' : _reflection.GeneratedProtocolMessageType('ExecsEntry', (_message.Message,), { + 'DESCRIPTOR' : _PROFILENODE_EXECSENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileNode.ExecsEntry) + }) + , + 'DESCRIPTOR' : _PROFILENODE, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ProfileNode) + }) +_sym_db.RegisterMessage(ProfileNode) +_sym_db.RegisterMessage(ProfileNode.InputsEntry) +_sym_db.RegisterMessage(ProfileNode.InputShapesEntry) +_sym_db.RegisterMessage(ProfileNode.OutputsEntry) +_sym_db.RegisterMessage(ProfileNode.OutputShapesEntry) +_sym_db.RegisterMessage(ProfileNode.SrcOutputIndexEntry) +_sym_db.RegisterMessage(ProfileNode.AttrsEntry) +_sym_db.RegisterMessage(ProfileNode.ExecsEntry) + +ExecProfile = _reflection.GeneratedProtocolMessageType('ExecProfile', (_message.Message,), { + + 'AcceleratorExecsEntry' : _reflection.GeneratedProtocolMessageType('AcceleratorExecsEntry', (_message.Message,), { + 'DESCRIPTOR' : _EXECPROFILE_ACCELERATOREXECSENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ExecProfile.AcceleratorExecsEntry) + }) + , + + 'CpuExecsEntry' : _reflection.GeneratedProtocolMessageType('CpuExecsEntry', (_message.Message,), { + 'DESCRIPTOR' : _EXECPROFILE_CPUEXECSENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ExecProfile.CpuExecsEntry) + }) + , + 'DESCRIPTOR' : _EXECPROFILE, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ExecProfile) + }) +_sym_db.RegisterMessage(ExecProfile) +_sym_db.RegisterMessage(ExecProfile.AcceleratorExecsEntry) +_sym_db.RegisterMessage(ExecProfile.CpuExecsEntry) + +ExecTime = _reflection.GeneratedProtocolMessageType('ExecTime', (_message.Message,), { + 'DESCRIPTOR' : _EXECTIME, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ExecTime) + }) +_sym_db.RegisterMessage(ExecTime) + +ExecMemory = _reflection.GeneratedProtocolMessageType('ExecMemory', (_message.Message,), { + + 'OutputMemoryEntry' : _reflection.GeneratedProtocolMessageType('OutputMemoryEntry', (_message.Message,), { + 'DESCRIPTOR' : _EXECMEMORY_OUTPUTMEMORYENTRY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ExecMemory.OutputMemoryEntry) + }) + , + 'DESCRIPTOR' : _EXECMEMORY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.ExecMemory) + }) +_sym_db.RegisterMessage(ExecMemory) +_sym_db.RegisterMessage(ExecMemory.OutputMemoryEntry) + +Tuple = _reflection.GeneratedProtocolMessageType('Tuple', (_message.Message,), { + 'DESCRIPTOR' : _TUPLE, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.Tuple) + }) +_sym_db.RegisterMessage(Tuple) + +Memory = _reflection.GeneratedProtocolMessageType('Memory', (_message.Message,), { + 'DESCRIPTOR' : _MEMORY, + '__module__' : 'tensorboard.compat.proto.tfprof_log_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.Memory) + }) +_sym_db.RegisterMessage(Memory) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'ZPgithub.com/tensorflow/tensorflow/tensorflow/go/core/profiler/protos_all_go_proto' + _CODEDEF_TRACE.fields_by_name['file']._options = None + _CODEDEF_TRACE.fields_by_name['file']._serialized_options = b'\030\001' + _CODEDEF_TRACE.fields_by_name['function']._options = None + _CODEDEF_TRACE.fields_by_name['function']._serialized_options = b'\030\001' + _CODEDEF_TRACE.fields_by_name['line']._options = None + _CODEDEF_TRACE.fields_by_name['line']._serialized_options = b'\030\001' + _OPLOGPROTO_IDTOSTRINGENTRY._options = None + _OPLOGPROTO_IDTOSTRINGENTRY._serialized_options = b'8\001' + _PROFILEPROTO_NODESENTRY._options = None + _PROFILEPROTO_NODESENTRY._serialized_options = b'8\001' + _PROFILEPROTO_IDTOSTRINGENTRY._options = None + _PROFILEPROTO_IDTOSTRINGENTRY._serialized_options = b'8\001' + _PROFILENODE_INPUTSENTRY._options = None + _PROFILENODE_INPUTSENTRY._serialized_options = b'8\001' + _PROFILENODE_INPUTSHAPESENTRY._options = None + _PROFILENODE_INPUTSHAPESENTRY._serialized_options = b'8\001' + _PROFILENODE_OUTPUTSENTRY._options = None + _PROFILENODE_OUTPUTSENTRY._serialized_options = b'8\001' + _PROFILENODE_OUTPUTSHAPESENTRY._options = None + _PROFILENODE_OUTPUTSHAPESENTRY._serialized_options = b'8\001' + _PROFILENODE_SRCOUTPUTINDEXENTRY._options = None + _PROFILENODE_SRCOUTPUTINDEXENTRY._serialized_options = b'8\001' + _PROFILENODE_ATTRSENTRY._options = None + _PROFILENODE_ATTRSENTRY._serialized_options = b'8\001' + _PROFILENODE_EXECSENTRY._options = None + _PROFILENODE_EXECSENTRY._serialized_options = b'8\001' + _EXECPROFILE_ACCELERATOREXECSENTRY._options = None + _EXECPROFILE_ACCELERATOREXECSENTRY._serialized_options = b'8\001' + _EXECPROFILE_CPUEXECSENTRY._options = None + _EXECPROFILE_CPUEXECSENTRY._serialized_options = b'8\001' + _EXECMEMORY_OUTPUTMEMORYENTRY._options = None + _EXECMEMORY_OUTPUTMEMORYENTRY._serialized_options = b'8\001' + _CODEDEF._serialized_start=145 + _CODEDEF._serialized_end=362 + _CODEDEF_TRACE._serialized_start=201 + _CODEDEF_TRACE._serialized_end=362 + _OPLOGENTRY._serialized_start=364 + _OPLOGENTRY._serialized_end=464 + _OPLOGPROTO._serialized_start=467 + _OPLOGPROTO._serialized_end=639 + _OPLOGPROTO_IDTOSTRINGENTRY._serialized_start=590 + _OPLOGPROTO_IDTOSTRINGENTRY._serialized_end=639 + _PROFILEPROTO._serialized_start=642 + _PROFILEPROTO._serialized_end=964 + _PROFILEPROTO_NODESENTRY._serialized_start=843 + _PROFILEPROTO_NODESENTRY._serialized_end=913 + _PROFILEPROTO_IDTOSTRINGENTRY._serialized_start=590 + _PROFILEPROTO_IDTOSTRINGENTRY._serialized_end=639 + _PROFILENODE._serialized_start=967 + _PROFILENODE._serialized_end=2009 + _PROFILENODE_INPUTSENTRY._serialized_start=1574 + _PROFILENODE_INPUTSENTRY._serialized_end=1619 + _PROFILENODE_INPUTSHAPESENTRY._serialized_start=1621 + _PROFILENODE_INPUTSHAPESENTRY._serialized_end=1691 + _PROFILENODE_OUTPUTSENTRY._serialized_start=1693 + _PROFILENODE_OUTPUTSENTRY._serialized_end=1739 + _PROFILENODE_OUTPUTSHAPESENTRY._serialized_start=1741 + _PROFILENODE_OUTPUTSHAPESENTRY._serialized_end=1812 + _PROFILENODE_SRCOUTPUTINDEXENTRY._serialized_start=1814 + _PROFILENODE_SRCOUTPUTINDEXENTRY._serialized_end=1867 + _PROFILENODE_ATTRSENTRY._serialized_start=1869 + _PROFILENODE_ATTRSENTRY._serialized_end=1937 + _PROFILENODE_EXECSENTRY._serialized_start=1939 + _PROFILENODE_EXECSENTRY._serialized_end=2009 + _EXECPROFILE._serialized_start=2012 + _EXECPROFILE._serialized_end=2499 + _EXECPROFILE_ACCELERATOREXECSENTRY._serialized_start=2349 + _EXECPROFILE_ACCELERATOREXECSENTRY._serialized_end=2427 + _EXECPROFILE_CPUEXECSENTRY._serialized_start=2429 + _EXECPROFILE_CPUEXECSENTRY._serialized_end=2499 + _EXECTIME._serialized_start=2501 + _EXECTIME._serialized_end=2546 + _EXECMEMORY._serialized_start=2549 + _EXECMEMORY._serialized_end=2973 + _EXECMEMORY_OUTPUTMEMORYENTRY._serialized_start=2901 + _EXECMEMORY_OUTPUTMEMORYENTRY._serialized_end=2973 + _TUPLE._serialized_start=2975 + _TUPLE._serialized_end=3004 + _MEMORY._serialized_start=3006 + _MEMORY._serialized_end=3042 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/trackable_object_graph_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/trackable_object_graph_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..c045ff96f0f517d678690d0854fc346656cf540d --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/trackable_object_graph_pb2.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/trackable_object_graph.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n5tensorboard/compat/proto/trackable_object_graph.proto\x12\x0btensorboard\x1a\x1egoogle/protobuf/wrappers.proto\"\xf8\x05\n\x14TrackableObjectGraph\x12@\n\x05nodes\x18\x01 \x03(\x0b\x32\x31.tensorboard.TrackableObjectGraph.TrackableObject\x1a\x9d\x05\n\x0fTrackableObject\x12S\n\x08\x63hildren\x18\x01 \x03(\x0b\x32\x41.tensorboard.TrackableObjectGraph.TrackableObject.ObjectReference\x12V\n\nattributes\x18\x02 \x03(\x0b\x32\x42.tensorboard.TrackableObjectGraph.TrackableObject.SerializedTensor\x12_\n\x0eslot_variables\x18\x03 \x03(\x0b\x32G.tensorboard.TrackableObjectGraph.TrackableObject.SlotVariableReference\x12\x36\n\x10registered_saver\x18\x04 \x01(\x0b\x32\x1c.tensorboard.RegisteredSaver\x12\x39\n\x15has_checkpoint_values\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x1a\x36\n\x0fObjectReference\x12\x0f\n\x07node_id\x18\x01 \x01(\x05\x12\x12\n\nlocal_name\x18\x02 \x01(\t\x1a\x63\n\x10SerializedTensor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tfull_name\x18\x02 \x01(\t\x12\x16\n\x0e\x63heckpoint_key\x18\x03 \x01(\tJ\x04\x08\x04\x10\x05R\x10optional_restore\x1al\n\x15SlotVariableReference\x12!\n\x19original_variable_node_id\x18\x01 \x01(\x05\x12\x11\n\tslot_name\x18\x02 \x01(\t\x12\x1d\n\x15slot_variable_node_id\x18\x03 \x01(\x05\"4\n\x0fRegisteredSaver\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bobject_name\x18\x02 \x01(\tBZZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_TRACKABLEOBJECTGRAPH = DESCRIPTOR.message_types_by_name['TrackableObjectGraph'] +_TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT = _TRACKABLEOBJECTGRAPH.nested_types_by_name['TrackableObject'] +_TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_OBJECTREFERENCE = _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT.nested_types_by_name['ObjectReference'] +_TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_SERIALIZEDTENSOR = _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT.nested_types_by_name['SerializedTensor'] +_TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_SLOTVARIABLEREFERENCE = _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT.nested_types_by_name['SlotVariableReference'] +_REGISTEREDSAVER = DESCRIPTOR.message_types_by_name['RegisteredSaver'] +TrackableObjectGraph = _reflection.GeneratedProtocolMessageType('TrackableObjectGraph', (_message.Message,), { + + 'TrackableObject' : _reflection.GeneratedProtocolMessageType('TrackableObject', (_message.Message,), { + + 'ObjectReference' : _reflection.GeneratedProtocolMessageType('ObjectReference', (_message.Message,), { + 'DESCRIPTOR' : _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_OBJECTREFERENCE, + '__module__' : 'tensorboard.compat.proto.trackable_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.TrackableObjectGraph.TrackableObject.ObjectReference) + }) + , + + 'SerializedTensor' : _reflection.GeneratedProtocolMessageType('SerializedTensor', (_message.Message,), { + 'DESCRIPTOR' : _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_SERIALIZEDTENSOR, + '__module__' : 'tensorboard.compat.proto.trackable_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.TrackableObjectGraph.TrackableObject.SerializedTensor) + }) + , + + 'SlotVariableReference' : _reflection.GeneratedProtocolMessageType('SlotVariableReference', (_message.Message,), { + 'DESCRIPTOR' : _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_SLOTVARIABLEREFERENCE, + '__module__' : 'tensorboard.compat.proto.trackable_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.TrackableObjectGraph.TrackableObject.SlotVariableReference) + }) + , + 'DESCRIPTOR' : _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT, + '__module__' : 'tensorboard.compat.proto.trackable_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.TrackableObjectGraph.TrackableObject) + }) + , + 'DESCRIPTOR' : _TRACKABLEOBJECTGRAPH, + '__module__' : 'tensorboard.compat.proto.trackable_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.TrackableObjectGraph) + }) +_sym_db.RegisterMessage(TrackableObjectGraph) +_sym_db.RegisterMessage(TrackableObjectGraph.TrackableObject) +_sym_db.RegisterMessage(TrackableObjectGraph.TrackableObject.ObjectReference) +_sym_db.RegisterMessage(TrackableObjectGraph.TrackableObject.SerializedTensor) +_sym_db.RegisterMessage(TrackableObjectGraph.TrackableObject.SlotVariableReference) + +RegisteredSaver = _reflection.GeneratedProtocolMessageType('RegisteredSaver', (_message.Message,), { + 'DESCRIPTOR' : _REGISTEREDSAVER, + '__module__' : 'tensorboard.compat.proto.trackable_object_graph_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.RegisteredSaver) + }) +_sym_db.RegisterMessage(RegisteredSaver) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\370\001\001' + _TRACKABLEOBJECTGRAPH._serialized_start=103 + _TRACKABLEOBJECTGRAPH._serialized_end=863 + _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT._serialized_start=194 + _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT._serialized_end=863 + _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_OBJECTREFERENCE._serialized_start=598 + _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_OBJECTREFERENCE._serialized_end=652 + _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_SERIALIZEDTENSOR._serialized_start=654 + _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_SERIALIZEDTENSOR._serialized_end=753 + _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_SLOTVARIABLEREFERENCE._serialized_start=755 + _TRACKABLEOBJECTGRAPH_TRACKABLEOBJECT_SLOTVARIABLEREFERENCE._serialized_end=863 + _REGISTEREDSAVER._serialized_start=865 + _REGISTEREDSAVER._serialized_end=917 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/versions_pb2.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/versions_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..4fe028790839cd2bbb09b583e274a76ec5c7508b --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/versions_pb2.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorboard/compat/proto/versions.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'tensorboard/compat/proto/versions.proto\x12\x0btensorboard\"K\n\nVersionDef\x12\x10\n\x08producer\x18\x01 \x01(\x05\x12\x14\n\x0cmin_consumer\x18\x02 \x01(\x05\x12\x15\n\rbad_consumers\x18\x03 \x03(\x05\x42\x80\x01\n\x18org.tensorflow.frameworkB\x0eVersionsProtosP\x01ZOgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/versions_go_proto\xf8\x01\x01\x62\x06proto3') + + + +_VERSIONDEF = DESCRIPTOR.message_types_by_name['VersionDef'] +VersionDef = _reflection.GeneratedProtocolMessageType('VersionDef', (_message.Message,), { + 'DESCRIPTOR' : _VERSIONDEF, + '__module__' : 'tensorboard.compat.proto.versions_pb2' + # @@protoc_insertion_point(class_scope:tensorboard.VersionDef) + }) +_sym_db.RegisterMessage(VersionDef) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030org.tensorflow.frameworkB\016VersionsProtosP\001ZOgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/versions_go_proto\370\001\001' + _VERSIONDEF._serialized_start=56 + _VERSIONDEF._serialized_end=131 +# @@protoc_insertion_point(module_scope) diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/__init__.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1d00aa6b82952e3f94d5a279d2d0d9a864b2a71d --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/__init__.py @@ -0,0 +1,38 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +from tensorboard.compat.proto.config_pb2 import * # noqa +from tensorboard.compat.proto.event_pb2 import * # noqa +from tensorboard.compat.proto.graph_pb2 import * # noqa +from tensorboard.compat.proto.meta_graph_pb2 import * # noqa +from tensorboard.compat.proto.summary_pb2 import * # noqa +from .dtypes import as_dtype # noqa +from .dtypes import DType # noqa +from .dtypes import string # noqa +from . import app # noqa +from . import compat # noqa +from . import dtypes # noqa +from . import error_codes # noqa +from . import errors # noqa +from . import flags # noqa +from . import io # noqa +from . import pywrap_tensorflow # noqa +from . import tensor_shape # noqa + +compat.v1.errors = errors + +# Set a fake __version__ to help distinguish this as our own stub API. +__version__ = "stub" diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/compat/__init__.py b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3609676778b2ee8729c17a5ed89e8485af6ae0d0 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/compat/__init__.py @@ -0,0 +1,132 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions for Python 2 vs. 3 compatibility. + +## Conversion routines +In addition to the functions below, `as_str` converts an object to a `str`. + + +## Types +The compatibility module also provides the following types: + +* `bytes_or_text_types` +* `complex_types` +* `integral_types` +* `real_types` +""" + + +import numbers as _numbers +import numpy as _np + +from tensorboard.compat.tensorflow_stub.compat.v1 import * # noqa + + +def as_bytes(bytes_or_text, encoding="utf-8"): + """Converts either bytes or unicode to `bytes`, using utf-8 encoding for + text. + + Args: + bytes_or_text: A `bytes`, `str`, or `unicode` object. + encoding: A string indicating the charset for encoding unicode. + + Returns: + A `bytes` object. + + Raises: + TypeError: If `bytes_or_text` is not a binary or unicode string. + """ + if isinstance(bytes_or_text, str): + return bytes_or_text.encode(encoding) + elif isinstance(bytes_or_text, bytes): + return bytes_or_text + else: + raise TypeError( + "Expected binary or unicode string, got %r" % (bytes_or_text,) + ) + + +def as_text(bytes_or_text, encoding="utf-8"): + """Returns the given argument as a unicode string. + + Args: + bytes_or_text: A `bytes`, `str`, or `unicode` object. + encoding: A string indicating the charset for decoding unicode. + + Returns: + A `unicode` (Python 2) or `str` (Python 3) object. + + Raises: + TypeError: If `bytes_or_text` is not a binary or unicode string. + """ + if isinstance(bytes_or_text, str): + return bytes_or_text + elif isinstance(bytes_or_text, bytes): + return bytes_or_text.decode(encoding) + else: + raise TypeError( + "Expected binary or unicode string, got %r" % bytes_or_text + ) + + +# Convert an object to a `str` in both Python 2 and 3. +as_str = as_text + + +# @tf_export('compat.as_str_any') +def as_str_any(value): + """Converts to `str` as `str(value)`, but use `as_str` for `bytes`. + + Args: + value: A object that can be converted to `str`. + + Returns: + A `str` object. + """ + if isinstance(value, bytes): + return as_str(value) + else: + return str(value) + + +# @tf_export('compat.path_to_str') +def path_to_str(path): + """Returns the file system path representation of a `PathLike` object, else + as it is. + + Args: + path: An object that can be converted to path representation. + + Returns: + A `str` object. + """ + if hasattr(path, "__fspath__"): + path = as_str_any(path.__fspath__()) + return path + + +# Numpy 1.8 scalars don't inherit from numbers.Integral in Python 3, so we +# need to check them specifically. The same goes from Real and Complex. +integral_types = (_numbers.Integral, _np.integer) +# tf_export('compat.integral_types').export_constant(__name__, 'integral_types') +real_types = (_numbers.Real, _np.integer, _np.floating) +# tf_export('compat.real_types').export_constant(__name__, 'real_types') +complex_types = (_numbers.Complex, _np.number) +# tf_export('compat.complex_types').export_constant(__name__, 'complex_types') + +# Either bytes or text. +bytes_or_text_types = (bytes, str) +# tf_export('compat.bytes_or_text_types').export_constant(__name__, +# 'bytes_or_text_types') diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/compat/__pycache__/__init__.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/compat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ed4279c46d38f4441302147b65ff43ebdaff16f Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/compat/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/compat/v1/__pycache__/__init__.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/compat/v1/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c76a74390eb5a5a4288a0cbf909df96d227ba91 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/compat/v1/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/io/__pycache__/__init__.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/io/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e7a93bc3ed579d33b1e6a041b4cc8d5b8fbe498 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/tensorflow_stub/io/__pycache__/__init__.cpython-310.pyc differ