Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +8 -0
- evalkit_tf440/lib/libbz2.so.1.0.8 +3 -0
- evalkit_tf440/lib/libncurses++.a +3 -0
- evalkit_tf440/lib/libsqlite3.so.0 +3 -0
- evalkit_tf440/lib/libsqlite3.so.0.8.6 +3 -0
- evalkit_tf440/lib/libstdc++.so +3 -0
- evalkit_tf440/lib/libstdc++.so.6.0.29 +3 -0
- evalkit_tf440/lib/libtinfo.so +3 -0
- evalkit_tf440/lib/libtinfow.so.6.4 +3 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/__init__.py +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/client_feature_flags.py +113 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/empty_path_redirect.py +46 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/data_ingester.py +277 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_accumulator.py +951 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_file_inspector.py +465 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_file_loader.py +293 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_multiplexer.py +523 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_util.py +68 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/plugin_asset_util.py +105 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/plugin_event_accumulator.py +722 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/json_util.py +72 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/path_prefix.py +68 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__init__.py +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/allocation_description_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/api_def_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/attr_value_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cluster_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/config_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/coordination_config_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cost_graph_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cpp_shape_inference_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/debug_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/event_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/full_type_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/function_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/graph_debug_info_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/graph_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/histogram_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/meta_graph_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/node_def_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/op_def_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/resource_handle_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/rewriter_config_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/rpc_options_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/saved_object_graph_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/saver_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/step_stats_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/struct_pb2.cpython-310.pyc +0 -0
- infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/summary_pb2.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -756,3 +756,11 @@ deepseekvl2/lib/python3.10/lib-dynload/_codecs_tw.cpython-310-x86_64-linux-gnu.s
|
|
| 756 |
deepseekvl2/lib/python3.10/lib-dynload/_socket.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 757 |
evalkit_tf440/lib/libreadline.a filter=lfs diff=lfs merge=lfs -text
|
| 758 |
infer_4_33_0/lib/python3.10/site-packages/h5py/defs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 756 |
deepseekvl2/lib/python3.10/lib-dynload/_socket.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 757 |
evalkit_tf440/lib/libreadline.a filter=lfs diff=lfs merge=lfs -text
|
| 758 |
infer_4_33_0/lib/python3.10/site-packages/h5py/defs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 759 |
+
evalkit_tf440/lib/libtinfow.so.6.4 filter=lfs diff=lfs merge=lfs -text
|
| 760 |
+
evalkit_tf440/lib/libsqlite3.so.0 filter=lfs diff=lfs merge=lfs -text
|
| 761 |
+
evalkit_tf440/lib/libbz2.so.1.0.8 filter=lfs diff=lfs merge=lfs -text
|
| 762 |
+
evalkit_tf440/lib/libstdc++.so.6.0.29 filter=lfs diff=lfs merge=lfs -text
|
| 763 |
+
evalkit_tf440/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
|
| 764 |
+
evalkit_tf440/lib/libstdc++.so filter=lfs diff=lfs merge=lfs -text
|
| 765 |
+
evalkit_tf440/lib/libtinfo.so filter=lfs diff=lfs merge=lfs -text
|
| 766 |
+
evalkit_tf440/lib/libncurses++.a filter=lfs diff=lfs merge=lfs -text
|
evalkit_tf440/lib/libbz2.so.1.0.8
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4979469ae49ac144f62202f75bbdd69b17197aedb879d633337c8cf7e4aba301
|
| 3 |
+
size 229016
|
evalkit_tf440/lib/libncurses++.a
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93b48c40f5d7b07e1a8c4bd9419df55c28e250cca1166be4aafd2fc7caf18823
|
| 3 |
+
size 187604
|
evalkit_tf440/lib/libsqlite3.so.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71932eb5bf89092fbd2c900601fc9f24aa184d65038aaec2445fd11b1d923327
|
| 3 |
+
size 1543808
|
evalkit_tf440/lib/libsqlite3.so.0.8.6
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71932eb5bf89092fbd2c900601fc9f24aa184d65038aaec2445fd11b1d923327
|
| 3 |
+
size 1543808
|
evalkit_tf440/lib/libstdc++.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f045231ff3a95c2fbfde450575f0ef45d23e95be15193c8729b521fc363ece4
|
| 3 |
+
size 17981480
|
evalkit_tf440/lib/libstdc++.so.6.0.29
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f045231ff3a95c2fbfde450575f0ef45d23e95be15193c8729b521fc363ece4
|
| 3 |
+
size 17981480
|
evalkit_tf440/lib/libtinfo.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2024856ea882d3151c1de53fcc7c66af037565fb8b387e4db35fb80b61ca49b4
|
| 3 |
+
size 287080
|
evalkit_tf440/lib/libtinfow.so.6.4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2024856ea882d3151c1de53fcc7c66af037565fb8b387e4db35fb80b61ca49b4
|
| 3 |
+
size 287080
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/__init__.py
ADDED
|
File without changes
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/client_feature_flags.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Middleware for injecting client-side feature flags into the Context."""
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
import urllib.parse
|
| 19 |
+
|
| 20 |
+
from tensorboard import context
|
| 21 |
+
from tensorboard import errors
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ClientFeatureFlagsMiddleware:
|
| 25 |
+
"""Middleware for injecting client-side feature flags into the Context.
|
| 26 |
+
|
| 27 |
+
The client webapp is expected to include a json-serialized version of its
|
| 28 |
+
FeatureFlags in the `X-TensorBoard-Feature-Flags` header or the
|
| 29 |
+
`tensorBoardFeatureFlags` query parameter. This middleware extracts the
|
| 30 |
+
header or query parameter value and converts it into the client_feature_flags
|
| 31 |
+
property for the DataProvider's Context object, where client_feature_flags
|
| 32 |
+
is a Dict of string keys and arbitrary value types.
|
| 33 |
+
|
| 34 |
+
In the event that both the header and query parameter are specified, the
|
| 35 |
+
values from the header will take precedence.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(self, application):
|
| 39 |
+
"""Initializes this middleware.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
application: The WSGI application to wrap (see PEP 3333).
|
| 43 |
+
"""
|
| 44 |
+
self._application = application
|
| 45 |
+
|
| 46 |
+
def __call__(self, environ, start_response):
|
| 47 |
+
header_feature_flags = self._parse_potential_header_param_flags(
|
| 48 |
+
environ.get("HTTP_X_TENSORBOARD_FEATURE_FLAGS")
|
| 49 |
+
)
|
| 50 |
+
query_string_feature_flags = self._parse_potential_query_param_flags(
|
| 51 |
+
environ.get("QUERY_STRING")
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
if not header_feature_flags and not query_string_feature_flags:
|
| 55 |
+
return self._application(environ, start_response)
|
| 56 |
+
|
| 57 |
+
# header flags take precedence
|
| 58 |
+
for flag, value in header_feature_flags.items():
|
| 59 |
+
query_string_feature_flags[flag] = value
|
| 60 |
+
|
| 61 |
+
ctx = context.from_environ(environ).replace(
|
| 62 |
+
client_feature_flags=query_string_feature_flags
|
| 63 |
+
)
|
| 64 |
+
context.set_in_environ(environ, ctx)
|
| 65 |
+
|
| 66 |
+
return self._application(environ, start_response)
|
| 67 |
+
|
| 68 |
+
def _parse_potential_header_param_flags(self, header_string):
|
| 69 |
+
if not header_string:
|
| 70 |
+
return {}
|
| 71 |
+
|
| 72 |
+
try:
|
| 73 |
+
header_feature_flags = json.loads(header_string)
|
| 74 |
+
except json.JSONDecodeError:
|
| 75 |
+
raise errors.InvalidArgumentError(
|
| 76 |
+
"X-TensorBoard-Feature-Flags cannot be JSON decoded."
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
if not isinstance(header_feature_flags, dict):
|
| 80 |
+
raise errors.InvalidArgumentError(
|
| 81 |
+
"X-TensorBoard-Feature-Flags cannot be decoded to a dict."
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
return header_feature_flags
|
| 85 |
+
|
| 86 |
+
def _parse_potential_query_param_flags(self, query_string):
|
| 87 |
+
if not query_string:
|
| 88 |
+
return {}
|
| 89 |
+
|
| 90 |
+
try:
|
| 91 |
+
query_string_json = urllib.parse.parse_qs(query_string)
|
| 92 |
+
except ValueError:
|
| 93 |
+
return {}
|
| 94 |
+
|
| 95 |
+
# parse_qs returns the dictionary values as lists for each name.
|
| 96 |
+
potential_feature_flags = query_string_json.get(
|
| 97 |
+
"tensorBoardFeatureFlags", []
|
| 98 |
+
)
|
| 99 |
+
if not potential_feature_flags:
|
| 100 |
+
return {}
|
| 101 |
+
try:
|
| 102 |
+
client_feature_flags = json.loads(potential_feature_flags[0])
|
| 103 |
+
except json.JSONDecodeError:
|
| 104 |
+
raise errors.InvalidArgumentError(
|
| 105 |
+
"tensorBoardFeatureFlags cannot be JSON decoded."
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
if not isinstance(client_feature_flags, dict):
|
| 109 |
+
raise errors.InvalidArgumentError(
|
| 110 |
+
"tensorBoardFeatureFlags cannot be decoded to a dict."
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
return client_feature_flags
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/empty_path_redirect.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Redirect from an empty path to the virtual application root.
|
| 16 |
+
|
| 17 |
+
Sometimes, middleware transformations will make the path empty: for
|
| 18 |
+
example, navigating to "/foo" (no trailing slash) when the path prefix
|
| 19 |
+
is exactly "/foo". In such cases, relative links on the frontend would
|
| 20 |
+
break. Instead of handling this special case in each relevant
|
| 21 |
+
middleware, we install a top-level redirect handler from "" to "/".
|
| 22 |
+
|
| 23 |
+
This middleware respects `SCRIPT_NAME` as described by the WSGI spec. If
|
| 24 |
+
`SCRIPT_NAME` is set to "/foo", then an empty `PATH_INFO` corresponds to
|
| 25 |
+
the actual path "/foo", and so will be redirected to "/foo/".
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class EmptyPathRedirectMiddleware:
|
| 30 |
+
"""WSGI middleware to redirect from "" to "/"."""
|
| 31 |
+
|
| 32 |
+
def __init__(self, application):
|
| 33 |
+
"""Initializes this middleware.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
application: The WSGI application to wrap (see PEP 3333).
|
| 37 |
+
"""
|
| 38 |
+
self._application = application
|
| 39 |
+
|
| 40 |
+
def __call__(self, environ, start_response):
|
| 41 |
+
path = environ.get("PATH_INFO", "")
|
| 42 |
+
if path:
|
| 43 |
+
return self._application(environ, start_response)
|
| 44 |
+
location = environ.get("SCRIPT_NAME", "") + "/"
|
| 45 |
+
start_response("301 Moved Permanently", [("Location", location)])
|
| 46 |
+
return []
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/data_ingester.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Provides data ingestion logic backed by local event processing."""
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
import re
|
| 19 |
+
import threading
|
| 20 |
+
import time
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
from tensorboard.backend.event_processing import data_provider
|
| 24 |
+
from tensorboard.backend.event_processing import plugin_event_multiplexer
|
| 25 |
+
from tensorboard.backend.event_processing import tag_types
|
| 26 |
+
from tensorboard.compat import tf
|
| 27 |
+
from tensorboard.data import ingester
|
| 28 |
+
from tensorboard.plugins.audio import metadata as audio_metadata
|
| 29 |
+
from tensorboard.plugins.histogram import metadata as histogram_metadata
|
| 30 |
+
from tensorboard.plugins.image import metadata as image_metadata
|
| 31 |
+
from tensorboard.plugins.pr_curve import metadata as pr_curve_metadata
|
| 32 |
+
from tensorboard.plugins.scalar import metadata as scalar_metadata
|
| 33 |
+
from tensorboard.util import tb_logging
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
DEFAULT_SIZE_GUIDANCE = {
|
| 37 |
+
tag_types.TENSORS: 10,
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
# TODO(@wchargin): Replace with something that works for third-party plugins.
|
| 41 |
+
DEFAULT_TENSOR_SIZE_GUIDANCE = {
|
| 42 |
+
scalar_metadata.PLUGIN_NAME: 1000,
|
| 43 |
+
image_metadata.PLUGIN_NAME: 10,
|
| 44 |
+
audio_metadata.PLUGIN_NAME: 10,
|
| 45 |
+
histogram_metadata.PLUGIN_NAME: 500,
|
| 46 |
+
pr_curve_metadata.PLUGIN_NAME: 100,
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
logger = tb_logging.get_logger()
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class LocalDataIngester(ingester.DataIngester):
|
| 53 |
+
"""Data ingestion implementation to use when running locally."""
|
| 54 |
+
|
| 55 |
+
def __init__(self, flags):
|
| 56 |
+
"""Initializes a `LocalDataIngester` from `flags`.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
flags: An argparse.Namespace containing TensorBoard CLI flags.
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
The new `LocalDataIngester`.
|
| 63 |
+
"""
|
| 64 |
+
tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)
|
| 65 |
+
tensor_size_guidance.update(flags.samples_per_plugin)
|
| 66 |
+
self._multiplexer = plugin_event_multiplexer.EventMultiplexer(
|
| 67 |
+
size_guidance=DEFAULT_SIZE_GUIDANCE,
|
| 68 |
+
tensor_size_guidance=tensor_size_guidance,
|
| 69 |
+
purge_orphaned_data=flags.purge_orphaned_data,
|
| 70 |
+
max_reload_threads=flags.max_reload_threads,
|
| 71 |
+
event_file_active_filter=_get_event_file_active_filter(flags),
|
| 72 |
+
detect_file_replacement=flags.detect_file_replacement,
|
| 73 |
+
)
|
| 74 |
+
self._data_provider = data_provider.MultiplexerDataProvider(
|
| 75 |
+
self._multiplexer, flags.logdir or flags.logdir_spec
|
| 76 |
+
)
|
| 77 |
+
self._reload_interval = flags.reload_interval
|
| 78 |
+
self._reload_task = flags.reload_task
|
| 79 |
+
if flags.logdir:
|
| 80 |
+
self._path_to_run = {os.path.expanduser(flags.logdir): None}
|
| 81 |
+
else:
|
| 82 |
+
self._path_to_run = _parse_event_files_spec(flags.logdir_spec)
|
| 83 |
+
|
| 84 |
+
# Conditionally import tensorflow_io.
|
| 85 |
+
if getattr(tf, "__version__", "stub") != "stub":
|
| 86 |
+
_check_filesystem_support(self._path_to_run.keys())
|
| 87 |
+
|
| 88 |
+
@property
|
| 89 |
+
def data_provider(self):
|
| 90 |
+
return self._data_provider
|
| 91 |
+
|
| 92 |
+
@property
|
| 93 |
+
def deprecated_multiplexer(self):
|
| 94 |
+
return self._multiplexer
|
| 95 |
+
|
| 96 |
+
def start(self):
|
| 97 |
+
"""Starts ingesting data based on the ingester flag configuration."""
|
| 98 |
+
|
| 99 |
+
def _reload():
|
| 100 |
+
while True:
|
| 101 |
+
start = time.time()
|
| 102 |
+
logger.info("TensorBoard reload process beginning")
|
| 103 |
+
for path, name in self._path_to_run.items():
|
| 104 |
+
self._multiplexer.AddRunsFromDirectory(path, name)
|
| 105 |
+
logger.info(
|
| 106 |
+
"TensorBoard reload process: Reload the whole Multiplexer"
|
| 107 |
+
)
|
| 108 |
+
self._multiplexer.Reload()
|
| 109 |
+
duration = time.time() - start
|
| 110 |
+
logger.info(
|
| 111 |
+
"TensorBoard done reloading. Load took %0.3f secs", duration
|
| 112 |
+
)
|
| 113 |
+
if self._reload_interval == 0:
|
| 114 |
+
# Only load the multiplexer once. Do not continuously reload.
|
| 115 |
+
break
|
| 116 |
+
time.sleep(self._reload_interval)
|
| 117 |
+
|
| 118 |
+
if self._reload_task == "process":
|
| 119 |
+
logger.info("Launching reload in a child process")
|
| 120 |
+
import multiprocessing
|
| 121 |
+
|
| 122 |
+
process = multiprocessing.Process(target=_reload, name="Reloader")
|
| 123 |
+
# Best-effort cleanup; on exit, the main TB parent process will attempt to
|
| 124 |
+
# kill all its daemonic children.
|
| 125 |
+
process.daemon = True
|
| 126 |
+
process.start()
|
| 127 |
+
elif self._reload_task in ("thread", "auto"):
|
| 128 |
+
logger.info("Launching reload in a daemon thread")
|
| 129 |
+
thread = threading.Thread(target=_reload, name="Reloader")
|
| 130 |
+
# Make this a daemon thread, which won't block TB from exiting.
|
| 131 |
+
thread.daemon = True
|
| 132 |
+
thread.start()
|
| 133 |
+
elif self._reload_task == "blocking":
|
| 134 |
+
if self._reload_interval != 0:
|
| 135 |
+
raise ValueError(
|
| 136 |
+
"blocking reload only allowed with load_interval=0"
|
| 137 |
+
)
|
| 138 |
+
_reload()
|
| 139 |
+
else:
|
| 140 |
+
raise ValueError("unrecognized reload_task: %s" % self._reload_task)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def _get_event_file_active_filter(flags):
|
| 144 |
+
"""Returns a predicate for whether an event file load timestamp is active.
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
A predicate function accepting a single UNIX timestamp float argument, or
|
| 148 |
+
None if multi-file loading is not enabled.
|
| 149 |
+
"""
|
| 150 |
+
if not flags.reload_multifile:
|
| 151 |
+
return None
|
| 152 |
+
inactive_secs = flags.reload_multifile_inactive_secs
|
| 153 |
+
if inactive_secs == 0:
|
| 154 |
+
return None
|
| 155 |
+
if inactive_secs < 0:
|
| 156 |
+
return lambda timestamp: True
|
| 157 |
+
return lambda timestamp: timestamp + inactive_secs >= time.time()
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def _parse_event_files_spec(logdir_spec):
|
| 161 |
+
"""Parses `logdir_spec` into a map from paths to run group names.
|
| 162 |
+
|
| 163 |
+
The `--logdir_spec` flag format is a comma-separated list of path
|
| 164 |
+
specifications. A path spec looks like 'group_name:/path/to/directory' or
|
| 165 |
+
'/path/to/directory'; in the latter case, the group is unnamed. Group names
|
| 166 |
+
cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec
|
| 167 |
+
with no name and path '/foo:bar/baz'.
|
| 168 |
+
|
| 169 |
+
Globs are not supported.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
logdir: A comma-separated list of run specifications.
|
| 173 |
+
Returns:
|
| 174 |
+
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
|
| 175 |
+
Groups without an explicit name are named after their path. If logdir is
|
| 176 |
+
None, returns an empty dict, which is helpful for testing things that don't
|
| 177 |
+
require any valid runs.
|
| 178 |
+
"""
|
| 179 |
+
files = {}
|
| 180 |
+
if logdir_spec is None:
|
| 181 |
+
return files
|
| 182 |
+
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
|
| 183 |
+
uri_pattern = re.compile("[a-zA-Z][0-9a-zA-Z.]*://.*")
|
| 184 |
+
for specification in logdir_spec.split(","):
|
| 185 |
+
# Check if the spec contains group. A spec start with xyz:// is regarded as
|
| 186 |
+
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
|
| 187 |
+
# then we assume it's a path with a colon. If the spec looks like
|
| 188 |
+
# [a-zA-z]:\foo then we assume its a Windows path and not a single letter
|
| 189 |
+
# group
|
| 190 |
+
if (
|
| 191 |
+
uri_pattern.match(specification) is None
|
| 192 |
+
and ":" in specification
|
| 193 |
+
and specification[0] != "/"
|
| 194 |
+
and not os.path.splitdrive(specification)[0]
|
| 195 |
+
):
|
| 196 |
+
# We split at most once so run_name:/path:with/a/colon will work.
|
| 197 |
+
run_name, _, path = specification.partition(":")
|
| 198 |
+
else:
|
| 199 |
+
run_name = None
|
| 200 |
+
path = specification
|
| 201 |
+
if uri_pattern.match(path) is None:
|
| 202 |
+
path = os.path.realpath(os.path.expanduser(path))
|
| 203 |
+
files[path] = run_name
|
| 204 |
+
return files
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def _get_filesystem_scheme(path):
|
| 208 |
+
"""Extracts filesystem scheme from a given path.
|
| 209 |
+
|
| 210 |
+
The filesystem scheme is usually separated by `://` from the local filesystem
|
| 211 |
+
path if given. For example, the scheme of `file://tmp/tf` is `file`.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
path: A strings representing an input log directory.
|
| 215 |
+
Returns:
|
| 216 |
+
Filesystem scheme, None if the path doesn't contain one.
|
| 217 |
+
"""
|
| 218 |
+
if "://" not in path:
|
| 219 |
+
return None
|
| 220 |
+
return path.split("://")[0]
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def _check_filesystem_support(paths):
|
| 224 |
+
"""Examines the list of filesystems user requested.
|
| 225 |
+
|
| 226 |
+
If TF I/O schemes are requested, try to import tensorflow_io module.
|
| 227 |
+
|
| 228 |
+
Args:
|
| 229 |
+
paths: A list of strings representing input log directories.
|
| 230 |
+
"""
|
| 231 |
+
get_registered_schemes = getattr(
|
| 232 |
+
tf.io.gfile, "get_registered_schemes", None
|
| 233 |
+
)
|
| 234 |
+
registered_schemes = (
|
| 235 |
+
None if get_registered_schemes is None else get_registered_schemes()
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
# Only need to check one path for each scheme.
|
| 239 |
+
scheme_to_path = {_get_filesystem_scheme(path): path for path in paths}
|
| 240 |
+
missing_scheme = None
|
| 241 |
+
for scheme, path in scheme_to_path.items():
|
| 242 |
+
if scheme is None:
|
| 243 |
+
continue
|
| 244 |
+
# Use `tf.io.gfile.exists.get_registered_schemes` if possible.
|
| 245 |
+
if registered_schemes is not None:
|
| 246 |
+
if scheme not in registered_schemes:
|
| 247 |
+
missing_scheme = scheme
|
| 248 |
+
break
|
| 249 |
+
else:
|
| 250 |
+
# Fall back to `tf.io.gfile.exists`.
|
| 251 |
+
try:
|
| 252 |
+
tf.io.gfile.exists(path)
|
| 253 |
+
except tf.errors.UnimplementedError:
|
| 254 |
+
missing_scheme = scheme
|
| 255 |
+
break
|
| 256 |
+
except tf.errors.OpError:
|
| 257 |
+
# Swallow other errors; we aren't concerned about them at this point.
|
| 258 |
+
pass
|
| 259 |
+
|
| 260 |
+
if missing_scheme:
|
| 261 |
+
try:
|
| 262 |
+
import tensorflow_io # noqa: F401
|
| 263 |
+
except ImportError as e:
|
| 264 |
+
supported_schemes_msg = (
|
| 265 |
+
" (supported schemes: {})".format(registered_schemes)
|
| 266 |
+
if registered_schemes
|
| 267 |
+
else ""
|
| 268 |
+
)
|
| 269 |
+
raise tf.errors.UnimplementedError(
|
| 270 |
+
None,
|
| 271 |
+
None,
|
| 272 |
+
(
|
| 273 |
+
"Error: Unsupported filename scheme '{}'{}. For additional"
|
| 274 |
+
+ " filesystem support, consider installing TensorFlow I/O"
|
| 275 |
+
+ " (https://www.tensorflow.org/io) via `pip install tensorflow-io`."
|
| 276 |
+
).format(missing_scheme, supported_schemes_msg),
|
| 277 |
+
) from e
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_accumulator.py
ADDED
|
@@ -0,0 +1,951 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Takes a generator of values, and accumulates them for a frontend."""
|
| 16 |
+
|
| 17 |
+
import collections
|
| 18 |
+
import dataclasses
|
| 19 |
+
import threading
|
| 20 |
+
|
| 21 |
+
from typing import Optional, Sequence, Tuple
|
| 22 |
+
|
| 23 |
+
from tensorboard.backend.event_processing import directory_watcher
|
| 24 |
+
from tensorboard.backend.event_processing import event_file_loader
|
| 25 |
+
from tensorboard.backend.event_processing import event_util
|
| 26 |
+
from tensorboard.backend.event_processing import io_wrapper
|
| 27 |
+
from tensorboard.backend.event_processing import plugin_asset_util
|
| 28 |
+
from tensorboard.backend.event_processing import reservoir
|
| 29 |
+
from tensorboard.backend.event_processing import tag_types
|
| 30 |
+
from tensorboard.compat.proto import config_pb2
|
| 31 |
+
from tensorboard.compat.proto import event_pb2
|
| 32 |
+
from tensorboard.compat.proto import graph_pb2
|
| 33 |
+
from tensorboard.compat.proto import meta_graph_pb2
|
| 34 |
+
from tensorboard.compat.proto import tensor_pb2
|
| 35 |
+
from tensorboard.plugins.distribution import compressor
|
| 36 |
+
from tensorboard.util import tb_logging
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
logger = tb_logging.get_logger()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@dataclasses.dataclass(frozen=True)
|
| 43 |
+
class ScalarEvent:
|
| 44 |
+
"""Contains information of a scalar event.
|
| 45 |
+
|
| 46 |
+
Attributes:
|
| 47 |
+
wall_time: Timestamp of the event in seconds.
|
| 48 |
+
step: Global step of the event.
|
| 49 |
+
value: A float or int value of the scalar.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
wall_time: float
|
| 53 |
+
step: int
|
| 54 |
+
value: float
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@dataclasses.dataclass(frozen=True)
|
| 58 |
+
class CompressedHistogramEvent:
|
| 59 |
+
"""Contains information of a compressed histogram event.
|
| 60 |
+
|
| 61 |
+
Attributes:
|
| 62 |
+
wall_time: Timestamp of the event in seconds.
|
| 63 |
+
step: Global step of the event.
|
| 64 |
+
compressed_histogram_values: A sequence of tuples of basis points and
|
| 65 |
+
associated values in a compressed histogram.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
wall_time: float
|
| 69 |
+
step: int
|
| 70 |
+
compressed_histogram_values: Sequence[Tuple[float, float]]
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
@dataclasses.dataclass(frozen=True)
|
| 74 |
+
class HistogramValue:
|
| 75 |
+
"""Holds the information of the histogram values.
|
| 76 |
+
|
| 77 |
+
Attributes:
|
| 78 |
+
min: A float or int min value.
|
| 79 |
+
max: A float or int max value.
|
| 80 |
+
num: Total number of values.
|
| 81 |
+
sum: Sum of all values.
|
| 82 |
+
sum_squares: Sum of squares for all values.
|
| 83 |
+
bucket_limit: Upper values per bucket.
|
| 84 |
+
bucket: Numbers of values per bucket.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
min: float
|
| 88 |
+
max: float
|
| 89 |
+
num: int
|
| 90 |
+
sum: float
|
| 91 |
+
sum_squares: float
|
| 92 |
+
bucket_limit: Sequence[float]
|
| 93 |
+
bucket: Sequence[int]
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@dataclasses.dataclass(frozen=True)
|
| 97 |
+
class HistogramEvent:
|
| 98 |
+
"""Contains information of a histogram event.
|
| 99 |
+
|
| 100 |
+
Attributes:
|
| 101 |
+
wall_time: Timestamp of the event in seconds.
|
| 102 |
+
step: Global step of the event.
|
| 103 |
+
histogram_value: Information of the histogram values.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
wall_time: float
|
| 107 |
+
step: int
|
| 108 |
+
histogram_value: HistogramValue
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@dataclasses.dataclass(frozen=True)
|
| 112 |
+
class ImageEvent:
|
| 113 |
+
"""Contains information of an image event.
|
| 114 |
+
|
| 115 |
+
Attributes:
|
| 116 |
+
wall_time: Timestamp of the event in seconds.
|
| 117 |
+
step: Global step of the event.
|
| 118 |
+
encoded_image_string: Image content encoded in bytes.
|
| 119 |
+
width: Width of the image.
|
| 120 |
+
height: Height of the image.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
wall_time: float
|
| 124 |
+
step: int
|
| 125 |
+
encoded_image_string: bytes
|
| 126 |
+
width: int
|
| 127 |
+
height: int
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@dataclasses.dataclass(frozen=True)
|
| 131 |
+
class AudioEvent:
|
| 132 |
+
"""Contains information of an audio event.
|
| 133 |
+
|
| 134 |
+
Attributes:
|
| 135 |
+
wall_time: Timestamp of the event in seconds.
|
| 136 |
+
step: Global step of the event.
|
| 137 |
+
encoded_audio_string: Audio content encoded in bytes.
|
| 138 |
+
content_type: A string describes the type of the audio content.
|
| 139 |
+
sample_rate: Sample rate of the audio in Hz. Must be positive.
|
| 140 |
+
length_frames: Length of the audio in frames (samples per channel).
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
wall_time: float
|
| 144 |
+
step: int
|
| 145 |
+
encoded_audio_string: bytes
|
| 146 |
+
content_type: str
|
| 147 |
+
sample_rate: float
|
| 148 |
+
length_frames: int
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@dataclasses.dataclass(frozen=True)
|
| 152 |
+
class TensorEvent:
|
| 153 |
+
"""A tensor event.
|
| 154 |
+
|
| 155 |
+
Attributes:
|
| 156 |
+
wall_time: Timestamp of the event in seconds.
|
| 157 |
+
step: Global step of the event.
|
| 158 |
+
tensor_proto: A `TensorProto`.
|
| 159 |
+
"""
|
| 160 |
+
|
| 161 |
+
wall_time: float
|
| 162 |
+
step: int
|
| 163 |
+
tensor_proto: tensor_pb2.TensorProto
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
## Different types of summary events handled by the event_accumulator
|
| 167 |
+
SUMMARY_TYPES = {
|
| 168 |
+
"simple_value": "_ProcessScalar",
|
| 169 |
+
"histo": "_ProcessHistogram",
|
| 170 |
+
"image": "_ProcessImage",
|
| 171 |
+
"audio": "_ProcessAudio",
|
| 172 |
+
"tensor": "_ProcessTensor",
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
# Legacy aliases
|
| 176 |
+
COMPRESSED_HISTOGRAMS = tag_types.COMPRESSED_HISTOGRAMS
|
| 177 |
+
HISTOGRAMS = tag_types.HISTOGRAMS
|
| 178 |
+
IMAGES = tag_types.IMAGES
|
| 179 |
+
AUDIO = tag_types.AUDIO
|
| 180 |
+
SCALARS = tag_types.SCALARS
|
| 181 |
+
TENSORS = tag_types.TENSORS
|
| 182 |
+
GRAPH = tag_types.GRAPH
|
| 183 |
+
META_GRAPH = tag_types.META_GRAPH
|
| 184 |
+
RUN_METADATA = tag_types.RUN_METADATA
|
| 185 |
+
|
| 186 |
+
## Normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf)
|
| 187 |
+
## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev,
|
| 188 |
+
## and then the long tail.
|
| 189 |
+
NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
|
| 190 |
+
|
| 191 |
+
DEFAULT_SIZE_GUIDANCE = {
|
| 192 |
+
COMPRESSED_HISTOGRAMS: 500,
|
| 193 |
+
IMAGES: 4,
|
| 194 |
+
AUDIO: 4,
|
| 195 |
+
SCALARS: 10000,
|
| 196 |
+
HISTOGRAMS: 1,
|
| 197 |
+
TENSORS: 10,
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
STORE_EVERYTHING_SIZE_GUIDANCE = {
|
| 201 |
+
COMPRESSED_HISTOGRAMS: 0,
|
| 202 |
+
IMAGES: 0,
|
| 203 |
+
AUDIO: 0,
|
| 204 |
+
SCALARS: 0,
|
| 205 |
+
HISTOGRAMS: 0,
|
| 206 |
+
TENSORS: 0,
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class EventAccumulator:
|
| 211 |
+
"""An `EventAccumulator` takes an event generator, and accumulates the
|
| 212 |
+
values.
|
| 213 |
+
|
| 214 |
+
The `EventAccumulator` is intended to provide a convenient Python interface
|
| 215 |
+
for loading Event data written during a TensorFlow run. TensorFlow writes out
|
| 216 |
+
`Event` protobuf objects, which have a timestamp and step number, and often
|
| 217 |
+
contain a `Summary`. Summaries can have different kinds of data like an image,
|
| 218 |
+
a scalar value, or a histogram. The Summaries also have a tag, which we use to
|
| 219 |
+
organize logically related data. The `EventAccumulator` supports retrieving
|
| 220 |
+
the `Event` and `Summary` data by its tag.
|
| 221 |
+
|
| 222 |
+
Calling `Tags()` gets a map from `tagType` (e.g. `'images'`,
|
| 223 |
+
`'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those
|
| 224 |
+
data types. Then, various functional endpoints (eg
|
| 225 |
+
`Accumulator.Scalars(tag)`) allow for the retrieval of all data
|
| 226 |
+
associated with that tag.
|
| 227 |
+
|
| 228 |
+
The `Reload()` method synchronously loads all of the data written so far.
|
| 229 |
+
|
| 230 |
+
Histograms, audio, and images are very large, so storing all of them is not
|
| 231 |
+
recommended.
|
| 232 |
+
|
| 233 |
+
Fields:
|
| 234 |
+
audios: A reservoir.Reservoir of audio summaries.
|
| 235 |
+
compressed_histograms: A reservoir.Reservoir of compressed
|
| 236 |
+
histogram summaries.
|
| 237 |
+
histograms: A reservoir.Reservoir of histogram summaries.
|
| 238 |
+
images: A reservoir.Reservoir of image summaries.
|
| 239 |
+
most_recent_step: Step of last Event proto added. This should only
|
| 240 |
+
be accessed from the thread that calls Reload. This is -1 if
|
| 241 |
+
nothing has been loaded yet.
|
| 242 |
+
most_recent_wall_time: Timestamp of last Event proto added. This is
|
| 243 |
+
a float containing seconds from the UNIX epoch, or -1 if
|
| 244 |
+
nothing has been loaded yet. This should only be accessed from
|
| 245 |
+
the thread that calls Reload.
|
| 246 |
+
path: A file path to a directory containing tf events files, or a single
|
| 247 |
+
tf events file. The accumulator will load events from this path.
|
| 248 |
+
scalars: A reservoir.Reservoir of scalar summaries.
|
| 249 |
+
tensors: A reservoir.Reservoir of tensor summaries.
|
| 250 |
+
|
| 251 |
+
@@Tensors
|
| 252 |
+
"""
|
| 253 |
+
|
| 254 |
+
def __init__(
|
| 255 |
+
self,
|
| 256 |
+
path,
|
| 257 |
+
size_guidance=None,
|
| 258 |
+
compression_bps=NORMAL_HISTOGRAM_BPS,
|
| 259 |
+
purge_orphaned_data=True,
|
| 260 |
+
):
|
| 261 |
+
"""Construct the `EventAccumulator`.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
path: A file path to a directory containing tf events files, or a single
|
| 265 |
+
tf events file. The accumulator will load events from this path.
|
| 266 |
+
size_guidance: Information on how much data the EventAccumulator should
|
| 267 |
+
store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much
|
| 268 |
+
so as to avoid OOMing the client. The size_guidance should be a map
|
| 269 |
+
from a `tagType` string to an integer representing the number of
|
| 270 |
+
items to keep per tag for items of that `tagType`. If the size is 0,
|
| 271 |
+
all events are stored.
|
| 272 |
+
compression_bps: Information on how the `EventAccumulator` should compress
|
| 273 |
+
histogram data for the `CompressedHistograms` tag (for details see
|
| 274 |
+
`ProcessCompressedHistogram`).
|
| 275 |
+
purge_orphaned_data: Whether to discard any events that were "orphaned" by
|
| 276 |
+
a TensorFlow restart.
|
| 277 |
+
"""
|
| 278 |
+
size_guidance = size_guidance or DEFAULT_SIZE_GUIDANCE
|
| 279 |
+
sizes = {}
|
| 280 |
+
for key in DEFAULT_SIZE_GUIDANCE:
|
| 281 |
+
if key in size_guidance:
|
| 282 |
+
sizes[key] = size_guidance[key]
|
| 283 |
+
else:
|
| 284 |
+
sizes[key] = DEFAULT_SIZE_GUIDANCE[key]
|
| 285 |
+
|
| 286 |
+
self._first_event_timestamp = None
|
| 287 |
+
self.scalars = reservoir.Reservoir(size=sizes[SCALARS])
|
| 288 |
+
|
| 289 |
+
self._graph = None
|
| 290 |
+
self._graph_from_metagraph = False
|
| 291 |
+
self._meta_graph = None
|
| 292 |
+
self._tagged_metadata = {}
|
| 293 |
+
self.summary_metadata = {}
|
| 294 |
+
self.histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])
|
| 295 |
+
self.compressed_histograms = reservoir.Reservoir(
|
| 296 |
+
size=sizes[COMPRESSED_HISTOGRAMS], always_keep_last=False
|
| 297 |
+
)
|
| 298 |
+
self.images = reservoir.Reservoir(size=sizes[IMAGES])
|
| 299 |
+
self.audios = reservoir.Reservoir(size=sizes[AUDIO])
|
| 300 |
+
self.tensors = reservoir.Reservoir(size=sizes[TENSORS])
|
| 301 |
+
|
| 302 |
+
# Keep a mapping from plugin name to a dict mapping from tag to plugin data
|
| 303 |
+
# content obtained from the SummaryMetadata (metadata field of Value) for
|
| 304 |
+
# that plugin (This is not the entire SummaryMetadata proto - only the
|
| 305 |
+
# content for that plugin). The SummaryWriter only keeps the content on the
|
| 306 |
+
# first event encountered per tag, so we must store that first instance of
|
| 307 |
+
# content for each tag.
|
| 308 |
+
self._plugin_to_tag_to_content = collections.defaultdict(dict)
|
| 309 |
+
|
| 310 |
+
self._generator_mutex = threading.Lock()
|
| 311 |
+
self.path = path
|
| 312 |
+
self._generator = _GeneratorFromPath(path)
|
| 313 |
+
|
| 314 |
+
self._compression_bps = compression_bps
|
| 315 |
+
self.purge_orphaned_data = purge_orphaned_data
|
| 316 |
+
|
| 317 |
+
self.most_recent_step = -1
|
| 318 |
+
self.most_recent_wall_time = -1
|
| 319 |
+
self.file_version = None
|
| 320 |
+
|
| 321 |
+
# Name of the source writer that writes the event.
|
| 322 |
+
self._source_writer = None
|
| 323 |
+
|
| 324 |
+
# The attributes that get built up by the accumulator
|
| 325 |
+
self.accumulated_attrs = (
|
| 326 |
+
"scalars",
|
| 327 |
+
"histograms",
|
| 328 |
+
"compressed_histograms",
|
| 329 |
+
"images",
|
| 330 |
+
"audios",
|
| 331 |
+
)
|
| 332 |
+
self._tensor_summaries = {}
|
| 333 |
+
|
| 334 |
+
def Reload(self):
|
| 335 |
+
"""Loads all events added since the last call to `Reload`.
|
| 336 |
+
|
| 337 |
+
If `Reload` was never called, loads all events in the file.
|
| 338 |
+
|
| 339 |
+
Returns:
|
| 340 |
+
The `EventAccumulator`.
|
| 341 |
+
"""
|
| 342 |
+
with self._generator_mutex:
|
| 343 |
+
for event in self._generator.Load():
|
| 344 |
+
self._ProcessEvent(event)
|
| 345 |
+
return self
|
| 346 |
+
|
| 347 |
+
def PluginAssets(self, plugin_name):
|
| 348 |
+
"""Return a list of all plugin assets for the given plugin.
|
| 349 |
+
|
| 350 |
+
Args:
|
| 351 |
+
plugin_name: The string name of a plugin to retrieve assets for.
|
| 352 |
+
|
| 353 |
+
Returns:
|
| 354 |
+
A list of string plugin asset names, or empty list if none are available.
|
| 355 |
+
If the plugin was not registered, an empty list is returned.
|
| 356 |
+
"""
|
| 357 |
+
return plugin_asset_util.ListAssets(self.path, plugin_name)
|
| 358 |
+
|
| 359 |
+
def RetrievePluginAsset(self, plugin_name, asset_name):
|
| 360 |
+
"""Return the contents of a given plugin asset.
|
| 361 |
+
|
| 362 |
+
Args:
|
| 363 |
+
plugin_name: The string name of a plugin.
|
| 364 |
+
asset_name: The string name of an asset.
|
| 365 |
+
|
| 366 |
+
Returns:
|
| 367 |
+
The string contents of the plugin asset.
|
| 368 |
+
|
| 369 |
+
Raises:
|
| 370 |
+
KeyError: If the asset is not available.
|
| 371 |
+
"""
|
| 372 |
+
return plugin_asset_util.RetrieveAsset(
|
| 373 |
+
self.path, plugin_name, asset_name
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
def FirstEventTimestamp(self):
|
| 377 |
+
"""Returns the timestamp in seconds of the first event.
|
| 378 |
+
|
| 379 |
+
If the first event has been loaded (either by this method or by `Reload`,
|
| 380 |
+
this returns immediately. Otherwise, it will load in the first event. Note
|
| 381 |
+
that this means that calling `Reload` will cause this to block until
|
| 382 |
+
`Reload` has finished.
|
| 383 |
+
|
| 384 |
+
Returns:
|
| 385 |
+
The timestamp in seconds of the first event that was loaded.
|
| 386 |
+
|
| 387 |
+
Raises:
|
| 388 |
+
ValueError: If no events have been loaded and there were no events found
|
| 389 |
+
on disk.
|
| 390 |
+
"""
|
| 391 |
+
if self._first_event_timestamp is not None:
|
| 392 |
+
return self._first_event_timestamp
|
| 393 |
+
with self._generator_mutex:
|
| 394 |
+
try:
|
| 395 |
+
event = next(self._generator.Load())
|
| 396 |
+
self._ProcessEvent(event)
|
| 397 |
+
return self._first_event_timestamp
|
| 398 |
+
|
| 399 |
+
except StopIteration:
|
| 400 |
+
raise ValueError("No event timestamp could be found")
|
| 401 |
+
|
| 402 |
+
def GetSourceWriter(self) -> Optional[str]:
|
| 403 |
+
"""Returns the name of the event writer."""
|
| 404 |
+
if self._source_writer is not None:
|
| 405 |
+
return self._source_writer
|
| 406 |
+
with self._generator_mutex:
|
| 407 |
+
try:
|
| 408 |
+
event = next(self._generator.Load())
|
| 409 |
+
self._ProcessEvent(event)
|
| 410 |
+
return self._source_writer
|
| 411 |
+
except StopIteration:
|
| 412 |
+
logger.info(
|
| 413 |
+
"End of file in %s, no source writer was found.", self.path
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
def PluginTagToContent(self, plugin_name):
|
| 417 |
+
"""Returns a dict mapping tags to content specific to that plugin.
|
| 418 |
+
|
| 419 |
+
Args:
|
| 420 |
+
plugin_name: The name of the plugin for which to fetch plugin-specific
|
| 421 |
+
content.
|
| 422 |
+
|
| 423 |
+
Raises:
|
| 424 |
+
KeyError: if the plugin name is not found.
|
| 425 |
+
|
| 426 |
+
Returns:
|
| 427 |
+
A dict mapping tag names to bytestrings of plugin-specific content-- by
|
| 428 |
+
convention, in the form of binary serialized protos.
|
| 429 |
+
"""
|
| 430 |
+
if plugin_name not in self._plugin_to_tag_to_content:
|
| 431 |
+
raise KeyError("Plugin %r could not be found." % plugin_name)
|
| 432 |
+
return self._plugin_to_tag_to_content[plugin_name]
|
| 433 |
+
|
| 434 |
+
def SummaryMetadata(self, tag):
|
| 435 |
+
"""Given a summary tag name, return the associated metadata object.
|
| 436 |
+
|
| 437 |
+
Args:
|
| 438 |
+
tag: The name of a tag, as a string.
|
| 439 |
+
|
| 440 |
+
Raises:
|
| 441 |
+
KeyError: If the tag is not found.
|
| 442 |
+
|
| 443 |
+
Returns:
|
| 444 |
+
A `SummaryMetadata` protobuf.
|
| 445 |
+
"""
|
| 446 |
+
return self.summary_metadata[tag]
|
| 447 |
+
|
| 448 |
+
def _ProcessEvent(self, event):
|
| 449 |
+
"""Called whenever an event is loaded."""
|
| 450 |
+
if self._first_event_timestamp is None:
|
| 451 |
+
self._first_event_timestamp = event.wall_time
|
| 452 |
+
|
| 453 |
+
if event.HasField("source_metadata"):
|
| 454 |
+
new_source_writer = event_util.GetSourceWriter(
|
| 455 |
+
event.source_metadata
|
| 456 |
+
)
|
| 457 |
+
if self._source_writer and self._source_writer != new_source_writer:
|
| 458 |
+
logger.info(
|
| 459 |
+
(
|
| 460 |
+
"Found new source writer for event.proto. "
|
| 461 |
+
"Old: {0}, New: {1}"
|
| 462 |
+
).format(self._source_writer, new_source_writer)
|
| 463 |
+
)
|
| 464 |
+
self._source_writer = new_source_writer
|
| 465 |
+
|
| 466 |
+
if event.HasField("file_version"):
|
| 467 |
+
new_file_version = event_util.ParseFileVersion(event.file_version)
|
| 468 |
+
if self.file_version and self.file_version != new_file_version:
|
| 469 |
+
## This should not happen.
|
| 470 |
+
logger.warning(
|
| 471 |
+
(
|
| 472 |
+
"Found new file_version for event.proto. This will "
|
| 473 |
+
"affect purging logic for TensorFlow restarts. "
|
| 474 |
+
"Old: {0} New: {1}"
|
| 475 |
+
).format(self.file_version, new_file_version)
|
| 476 |
+
)
|
| 477 |
+
self.file_version = new_file_version
|
| 478 |
+
|
| 479 |
+
self._MaybePurgeOrphanedData(event)
|
| 480 |
+
|
| 481 |
+
## Process the event.
|
| 482 |
+
# GraphDef and MetaGraphDef are handled in a special way:
|
| 483 |
+
# If no graph_def Event is available, but a meta_graph_def is, and it
|
| 484 |
+
# contains a graph_def, then use the meta_graph_def.graph_def as our graph.
|
| 485 |
+
# If a graph_def Event is available, always prefer it to the graph_def
|
| 486 |
+
# inside the meta_graph_def.
|
| 487 |
+
if event.HasField("graph_def"):
|
| 488 |
+
if self._graph is not None:
|
| 489 |
+
logger.warning(
|
| 490 |
+
(
|
| 491 |
+
"Found more than one graph event per run, or there was "
|
| 492 |
+
"a metagraph containing a graph_def, as well as one or "
|
| 493 |
+
"more graph events. Overwriting the graph with the "
|
| 494 |
+
"newest event."
|
| 495 |
+
)
|
| 496 |
+
)
|
| 497 |
+
self._graph = event.graph_def
|
| 498 |
+
self._graph_from_metagraph = False
|
| 499 |
+
elif event.HasField("meta_graph_def"):
|
| 500 |
+
if self._meta_graph is not None:
|
| 501 |
+
logger.warning(
|
| 502 |
+
(
|
| 503 |
+
"Found more than one metagraph event per run. "
|
| 504 |
+
"Overwriting the metagraph with the newest event."
|
| 505 |
+
)
|
| 506 |
+
)
|
| 507 |
+
self._meta_graph = event.meta_graph_def
|
| 508 |
+
if self._graph is None or self._graph_from_metagraph:
|
| 509 |
+
# We may have a graph_def in the metagraph. If so, and no
|
| 510 |
+
# graph_def is directly available, use this one instead.
|
| 511 |
+
meta_graph = meta_graph_pb2.MetaGraphDef()
|
| 512 |
+
meta_graph.ParseFromString(self._meta_graph)
|
| 513 |
+
if meta_graph.graph_def:
|
| 514 |
+
if self._graph is not None:
|
| 515 |
+
logger.warning(
|
| 516 |
+
(
|
| 517 |
+
"Found multiple metagraphs containing graph_defs,"
|
| 518 |
+
"but did not find any graph events. Overwriting the "
|
| 519 |
+
"graph with the newest metagraph version."
|
| 520 |
+
)
|
| 521 |
+
)
|
| 522 |
+
self._graph_from_metagraph = True
|
| 523 |
+
self._graph = meta_graph.graph_def.SerializeToString()
|
| 524 |
+
elif event.HasField("tagged_run_metadata"):
|
| 525 |
+
tag = event.tagged_run_metadata.tag
|
| 526 |
+
if tag in self._tagged_metadata:
|
| 527 |
+
logger.warning(
|
| 528 |
+
'Found more than one "run metadata" event with tag '
|
| 529 |
+
+ tag
|
| 530 |
+
+ ". Overwriting it with the newest event."
|
| 531 |
+
)
|
| 532 |
+
self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata
|
| 533 |
+
elif event.HasField("summary"):
|
| 534 |
+
for value in event.summary.value:
|
| 535 |
+
if value.HasField("metadata"):
|
| 536 |
+
tag = value.tag
|
| 537 |
+
# We only store the first instance of the metadata. This check
|
| 538 |
+
# is important: the `FileWriter` does strip metadata from all
|
| 539 |
+
# values except the first one per each tag, but a new
|
| 540 |
+
# `FileWriter` is created every time a training job stops and
|
| 541 |
+
# restarts. Hence, we must also ignore non-initial metadata in
|
| 542 |
+
# this logic.
|
| 543 |
+
if tag not in self.summary_metadata:
|
| 544 |
+
self.summary_metadata[tag] = value.metadata
|
| 545 |
+
plugin_data = value.metadata.plugin_data
|
| 546 |
+
if plugin_data.plugin_name:
|
| 547 |
+
self._plugin_to_tag_to_content[
|
| 548 |
+
plugin_data.plugin_name
|
| 549 |
+
][tag] = plugin_data.content
|
| 550 |
+
else:
|
| 551 |
+
logger.warning(
|
| 552 |
+
(
|
| 553 |
+
"This summary with tag %r is oddly not associated with a "
|
| 554 |
+
"plugin."
|
| 555 |
+
),
|
| 556 |
+
tag,
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
for summary_type, summary_func in SUMMARY_TYPES.items():
|
| 560 |
+
if value.HasField(summary_type):
|
| 561 |
+
datum = getattr(value, summary_type)
|
| 562 |
+
tag = value.tag
|
| 563 |
+
if summary_type == "tensor" and not tag:
|
| 564 |
+
# This tensor summary was created using the old method that used
|
| 565 |
+
# plugin assets. We must still continue to support it.
|
| 566 |
+
tag = value.node_name
|
| 567 |
+
getattr(self, summary_func)(
|
| 568 |
+
tag, event.wall_time, event.step, datum
|
| 569 |
+
)
|
| 570 |
+
|
| 571 |
+
def Tags(self):
|
| 572 |
+
"""Return all tags found in the value stream.
|
| 573 |
+
|
| 574 |
+
Returns:
|
| 575 |
+
A `{tagType: ['list', 'of', 'tags']}` dictionary.
|
| 576 |
+
"""
|
| 577 |
+
return {
|
| 578 |
+
IMAGES: self.images.Keys(),
|
| 579 |
+
AUDIO: self.audios.Keys(),
|
| 580 |
+
HISTOGRAMS: self.histograms.Keys(),
|
| 581 |
+
SCALARS: self.scalars.Keys(),
|
| 582 |
+
COMPRESSED_HISTOGRAMS: self.compressed_histograms.Keys(),
|
| 583 |
+
TENSORS: self.tensors.Keys(),
|
| 584 |
+
# Use a heuristic: if the metagraph is available, but
|
| 585 |
+
# graph is not, then we assume the metagraph contains the graph.
|
| 586 |
+
GRAPH: self._graph is not None,
|
| 587 |
+
META_GRAPH: self._meta_graph is not None,
|
| 588 |
+
RUN_METADATA: list(self._tagged_metadata.keys()),
|
| 589 |
+
}
|
| 590 |
+
|
| 591 |
+
def Scalars(self, tag):
|
| 592 |
+
"""Given a summary tag, return all associated `ScalarEvent`s.
|
| 593 |
+
|
| 594 |
+
Args:
|
| 595 |
+
tag: A string tag associated with the events.
|
| 596 |
+
|
| 597 |
+
Raises:
|
| 598 |
+
KeyError: If the tag is not found.
|
| 599 |
+
|
| 600 |
+
Returns:
|
| 601 |
+
An array of `ScalarEvent`s.
|
| 602 |
+
"""
|
| 603 |
+
return self.scalars.Items(tag)
|
| 604 |
+
|
| 605 |
+
def Graph(self):
|
| 606 |
+
"""Return the graph definition, if there is one.
|
| 607 |
+
|
| 608 |
+
If the graph is stored directly, return that. If no graph is stored
|
| 609 |
+
directly but a metagraph is stored containing a graph, return that.
|
| 610 |
+
|
| 611 |
+
Raises:
|
| 612 |
+
ValueError: If there is no graph for this run.
|
| 613 |
+
|
| 614 |
+
Returns:
|
| 615 |
+
The `graph_def` proto.
|
| 616 |
+
"""
|
| 617 |
+
graph = graph_pb2.GraphDef()
|
| 618 |
+
if self._graph is not None:
|
| 619 |
+
graph.ParseFromString(self._graph)
|
| 620 |
+
return graph
|
| 621 |
+
raise ValueError("There is no graph in this EventAccumulator")
|
| 622 |
+
|
| 623 |
+
def MetaGraph(self):
|
| 624 |
+
"""Return the metagraph definition, if there is one.
|
| 625 |
+
|
| 626 |
+
Raises:
|
| 627 |
+
ValueError: If there is no metagraph for this run.
|
| 628 |
+
|
| 629 |
+
Returns:
|
| 630 |
+
The `meta_graph_def` proto.
|
| 631 |
+
"""
|
| 632 |
+
if self._meta_graph is None:
|
| 633 |
+
raise ValueError("There is no metagraph in this EventAccumulator")
|
| 634 |
+
meta_graph = meta_graph_pb2.MetaGraphDef()
|
| 635 |
+
meta_graph.ParseFromString(self._meta_graph)
|
| 636 |
+
return meta_graph
|
| 637 |
+
|
| 638 |
+
def RunMetadata(self, tag):
|
| 639 |
+
"""Given a tag, return the associated session.run() metadata.
|
| 640 |
+
|
| 641 |
+
Args:
|
| 642 |
+
tag: A string tag associated with the event.
|
| 643 |
+
|
| 644 |
+
Raises:
|
| 645 |
+
ValueError: If the tag is not found.
|
| 646 |
+
|
| 647 |
+
Returns:
|
| 648 |
+
The metadata in form of `RunMetadata` proto.
|
| 649 |
+
"""
|
| 650 |
+
if tag not in self._tagged_metadata:
|
| 651 |
+
raise ValueError("There is no run metadata with this tag name")
|
| 652 |
+
|
| 653 |
+
run_metadata = config_pb2.RunMetadata()
|
| 654 |
+
run_metadata.ParseFromString(self._tagged_metadata[tag])
|
| 655 |
+
return run_metadata
|
| 656 |
+
|
| 657 |
+
def Histograms(self, tag):
|
| 658 |
+
"""Given a summary tag, return all associated histograms.
|
| 659 |
+
|
| 660 |
+
Args:
|
| 661 |
+
tag: A string tag associated with the events.
|
| 662 |
+
|
| 663 |
+
Raises:
|
| 664 |
+
KeyError: If the tag is not found.
|
| 665 |
+
|
| 666 |
+
Returns:
|
| 667 |
+
An array of `HistogramEvent`s.
|
| 668 |
+
"""
|
| 669 |
+
return self.histograms.Items(tag)
|
| 670 |
+
|
| 671 |
+
def CompressedHistograms(self, tag):
|
| 672 |
+
"""Given a summary tag, return all associated compressed histograms.
|
| 673 |
+
|
| 674 |
+
Args:
|
| 675 |
+
tag: A string tag associated with the events.
|
| 676 |
+
|
| 677 |
+
Raises:
|
| 678 |
+
KeyError: If the tag is not found.
|
| 679 |
+
|
| 680 |
+
Returns:
|
| 681 |
+
An array of `CompressedHistogramEvent`s.
|
| 682 |
+
"""
|
| 683 |
+
return self.compressed_histograms.Items(tag)
|
| 684 |
+
|
| 685 |
+
def Images(self, tag):
|
| 686 |
+
"""Given a summary tag, return all associated images.
|
| 687 |
+
|
| 688 |
+
Args:
|
| 689 |
+
tag: A string tag associated with the events.
|
| 690 |
+
|
| 691 |
+
Raises:
|
| 692 |
+
KeyError: If the tag is not found.
|
| 693 |
+
|
| 694 |
+
Returns:
|
| 695 |
+
An array of `ImageEvent`s.
|
| 696 |
+
"""
|
| 697 |
+
return self.images.Items(tag)
|
| 698 |
+
|
| 699 |
+
def Audio(self, tag):
|
| 700 |
+
"""Given a summary tag, return all associated audio.
|
| 701 |
+
|
| 702 |
+
Args:
|
| 703 |
+
tag: A string tag associated with the events.
|
| 704 |
+
|
| 705 |
+
Raises:
|
| 706 |
+
KeyError: If the tag is not found.
|
| 707 |
+
|
| 708 |
+
Returns:
|
| 709 |
+
An array of `AudioEvent`s.
|
| 710 |
+
"""
|
| 711 |
+
return self.audios.Items(tag)
|
| 712 |
+
|
| 713 |
+
def Tensors(self, tag):
|
| 714 |
+
"""Given a summary tag, return all associated tensors.
|
| 715 |
+
|
| 716 |
+
Args:
|
| 717 |
+
tag: A string tag associated with the events.
|
| 718 |
+
|
| 719 |
+
Raises:
|
| 720 |
+
KeyError: If the tag is not found.
|
| 721 |
+
|
| 722 |
+
Returns:
|
| 723 |
+
An array of `TensorEvent`s.
|
| 724 |
+
"""
|
| 725 |
+
return self.tensors.Items(tag)
|
| 726 |
+
|
| 727 |
+
def _MaybePurgeOrphanedData(self, event):
|
| 728 |
+
"""Maybe purge orphaned data due to a TensorFlow crash.
|
| 729 |
+
|
| 730 |
+
When TensorFlow crashes at step T+O and restarts at step T, any events
|
| 731 |
+
written after step T are now "orphaned" and will be at best misleading if
|
| 732 |
+
they are included in TensorBoard.
|
| 733 |
+
|
| 734 |
+
This logic attempts to determine if there is orphaned data, and purge it
|
| 735 |
+
if it is found.
|
| 736 |
+
|
| 737 |
+
Args:
|
| 738 |
+
event: The event to use as a reference, to determine if a purge is needed.
|
| 739 |
+
"""
|
| 740 |
+
if not self.purge_orphaned_data:
|
| 741 |
+
return
|
| 742 |
+
## Check if the event happened after a crash, and purge expired tags.
|
| 743 |
+
if self.file_version and self.file_version >= 2:
|
| 744 |
+
## If the file_version is recent enough, use the SessionLog enum
|
| 745 |
+
## to check for restarts.
|
| 746 |
+
self._CheckForRestartAndMaybePurge(event)
|
| 747 |
+
else:
|
| 748 |
+
## If there is no file version, default to old logic of checking for
|
| 749 |
+
## out of order steps.
|
| 750 |
+
self._CheckForOutOfOrderStepAndMaybePurge(event)
|
| 751 |
+
|
| 752 |
+
def _CheckForRestartAndMaybePurge(self, event):
|
| 753 |
+
"""Check and discard expired events using SessionLog.START.
|
| 754 |
+
|
| 755 |
+
Check for a SessionLog.START event and purge all previously seen events
|
| 756 |
+
with larger steps, because they are out of date. Because of supervisor
|
| 757 |
+
threading, it is possible that this logic will cause the first few event
|
| 758 |
+
messages to be discarded since supervisor threading does not guarantee
|
| 759 |
+
that the START message is deterministically written first.
|
| 760 |
+
|
| 761 |
+
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
|
| 762 |
+
can inadvertently discard events due to supervisor threading.
|
| 763 |
+
|
| 764 |
+
Args:
|
| 765 |
+
event: The event to use as reference. If the event is a START event, all
|
| 766 |
+
previously seen events with a greater event.step will be purged.
|
| 767 |
+
"""
|
| 768 |
+
if (
|
| 769 |
+
event.HasField("session_log")
|
| 770 |
+
and event.session_log.status == event_pb2.SessionLog.START
|
| 771 |
+
):
|
| 772 |
+
self._Purge(event, by_tags=False)
|
| 773 |
+
|
| 774 |
+
def _CheckForOutOfOrderStepAndMaybePurge(self, event):
|
| 775 |
+
"""Check for out-of-order event.step and discard expired events for
|
| 776 |
+
tags.
|
| 777 |
+
|
| 778 |
+
Check if the event is out of order relative to the global most recent step.
|
| 779 |
+
If it is, purge outdated summaries for tags that the event contains.
|
| 780 |
+
|
| 781 |
+
Args:
|
| 782 |
+
event: The event to use as reference. If the event is out-of-order, all
|
| 783 |
+
events with the same tags, but with a greater event.step will be purged.
|
| 784 |
+
"""
|
| 785 |
+
if event.step < self.most_recent_step and event.HasField("summary"):
|
| 786 |
+
self._Purge(event, by_tags=True)
|
| 787 |
+
else:
|
| 788 |
+
self.most_recent_step = event.step
|
| 789 |
+
self.most_recent_wall_time = event.wall_time
|
| 790 |
+
|
| 791 |
+
def _ConvertHistogramProtoToPopo(self, histo):
|
| 792 |
+
"""Converts histogram proto to Python object."""
|
| 793 |
+
return HistogramValue(
|
| 794 |
+
min=histo.min,
|
| 795 |
+
max=histo.max,
|
| 796 |
+
num=histo.num,
|
| 797 |
+
sum=histo.sum,
|
| 798 |
+
sum_squares=histo.sum_squares,
|
| 799 |
+
bucket_limit=list(histo.bucket_limit),
|
| 800 |
+
bucket=list(histo.bucket),
|
| 801 |
+
)
|
| 802 |
+
|
| 803 |
+
def _ProcessHistogram(self, tag, wall_time, step, histo):
|
| 804 |
+
"""Processes a proto histogram by adding it to accumulated state."""
|
| 805 |
+
histo = self._ConvertHistogramProtoToPopo(histo)
|
| 806 |
+
histo_ev = HistogramEvent(wall_time, step, histo)
|
| 807 |
+
self.histograms.AddItem(tag, histo_ev)
|
| 808 |
+
self.compressed_histograms.AddItem(
|
| 809 |
+
tag, histo_ev, self._CompressHistogram
|
| 810 |
+
)
|
| 811 |
+
|
| 812 |
+
def _CompressHistogram(self, histo_ev):
|
| 813 |
+
"""Callback for _ProcessHistogram."""
|
| 814 |
+
return CompressedHistogramEvent(
|
| 815 |
+
histo_ev.wall_time,
|
| 816 |
+
histo_ev.step,
|
| 817 |
+
compressor.compress_histogram_proto(
|
| 818 |
+
histo_ev.histogram_value, self._compression_bps
|
| 819 |
+
),
|
| 820 |
+
)
|
| 821 |
+
|
| 822 |
+
def _ProcessImage(self, tag, wall_time, step, image):
|
| 823 |
+
"""Processes an image by adding it to accumulated state."""
|
| 824 |
+
event = ImageEvent(
|
| 825 |
+
wall_time=wall_time,
|
| 826 |
+
step=step,
|
| 827 |
+
encoded_image_string=image.encoded_image_string,
|
| 828 |
+
width=image.width,
|
| 829 |
+
height=image.height,
|
| 830 |
+
)
|
| 831 |
+
self.images.AddItem(tag, event)
|
| 832 |
+
|
| 833 |
+
def _ProcessAudio(self, tag, wall_time, step, audio):
|
| 834 |
+
"""Processes a audio by adding it to accumulated state."""
|
| 835 |
+
event = AudioEvent(
|
| 836 |
+
wall_time=wall_time,
|
| 837 |
+
step=step,
|
| 838 |
+
encoded_audio_string=audio.encoded_audio_string,
|
| 839 |
+
content_type=audio.content_type,
|
| 840 |
+
sample_rate=audio.sample_rate,
|
| 841 |
+
length_frames=audio.length_frames,
|
| 842 |
+
)
|
| 843 |
+
self.audios.AddItem(tag, event)
|
| 844 |
+
|
| 845 |
+
def _ProcessScalar(self, tag, wall_time, step, scalar):
|
| 846 |
+
"""Processes a simple value by adding it to accumulated state."""
|
| 847 |
+
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)
|
| 848 |
+
self.scalars.AddItem(tag, sv)
|
| 849 |
+
|
| 850 |
+
def _ProcessTensor(self, tag, wall_time, step, tensor):
|
| 851 |
+
tv = TensorEvent(wall_time=wall_time, step=step, tensor_proto=tensor)
|
| 852 |
+
self.tensors.AddItem(tag, tv)
|
| 853 |
+
|
| 854 |
+
def _Purge(self, event, by_tags):
|
| 855 |
+
"""Purge all events that have occurred after the given event.step.
|
| 856 |
+
|
| 857 |
+
If by_tags is True, purge all events that occurred after the given
|
| 858 |
+
event.step, but only for the tags that the event has. Non-sequential
|
| 859 |
+
event.steps suggest that a TensorFlow restart occurred, and we discard
|
| 860 |
+
the out-of-order events to display a consistent view in TensorBoard.
|
| 861 |
+
|
| 862 |
+
Discarding by tags is the safer method, when we are unsure whether a restart
|
| 863 |
+
has occurred, given that threading in supervisor can cause events of
|
| 864 |
+
different tags to arrive with unsynchronized step values.
|
| 865 |
+
|
| 866 |
+
If by_tags is False, then purge all events with event.step greater than the
|
| 867 |
+
given event.step. This can be used when we are certain that a TensorFlow
|
| 868 |
+
restart has occurred and these events can be discarded.
|
| 869 |
+
|
| 870 |
+
Args:
|
| 871 |
+
event: The event to use as reference for the purge. All events with
|
| 872 |
+
the same tags, but with a greater event.step will be purged.
|
| 873 |
+
by_tags: Bool to dictate whether to discard all out-of-order events or
|
| 874 |
+
only those that are associated with the given reference event.
|
| 875 |
+
"""
|
| 876 |
+
## Keep data in reservoirs that has a step less than event.step
|
| 877 |
+
_NotExpired = lambda x: x.step < event.step
|
| 878 |
+
|
| 879 |
+
if by_tags:
|
| 880 |
+
|
| 881 |
+
def _ExpiredPerTag(value):
|
| 882 |
+
return [
|
| 883 |
+
getattr(self, x).FilterItems(_NotExpired, value.tag)
|
| 884 |
+
for x in self.accumulated_attrs
|
| 885 |
+
]
|
| 886 |
+
|
| 887 |
+
expired_per_tags = [
|
| 888 |
+
_ExpiredPerTag(value) for value in event.summary.value
|
| 889 |
+
]
|
| 890 |
+
expired_per_type = [sum(x) for x in zip(*expired_per_tags)]
|
| 891 |
+
else:
|
| 892 |
+
expired_per_type = [
|
| 893 |
+
getattr(self, x).FilterItems(_NotExpired)
|
| 894 |
+
for x in self.accumulated_attrs
|
| 895 |
+
]
|
| 896 |
+
|
| 897 |
+
if sum(expired_per_type) > 0:
|
| 898 |
+
purge_msg = _GetPurgeMessage(
|
| 899 |
+
self.most_recent_step,
|
| 900 |
+
self.most_recent_wall_time,
|
| 901 |
+
event.step,
|
| 902 |
+
event.wall_time,
|
| 903 |
+
*expired_per_type,
|
| 904 |
+
)
|
| 905 |
+
logger.warning(purge_msg)
|
| 906 |
+
|
| 907 |
+
|
| 908 |
+
def _GetPurgeMessage(
|
| 909 |
+
most_recent_step,
|
| 910 |
+
most_recent_wall_time,
|
| 911 |
+
event_step,
|
| 912 |
+
event_wall_time,
|
| 913 |
+
num_expired_scalars,
|
| 914 |
+
num_expired_histos,
|
| 915 |
+
num_expired_comp_histos,
|
| 916 |
+
num_expired_images,
|
| 917 |
+
num_expired_audio,
|
| 918 |
+
):
|
| 919 |
+
"""Return the string message associated with TensorBoard purges."""
|
| 920 |
+
return (
|
| 921 |
+
"Detected out of order event.step likely caused by "
|
| 922 |
+
"a TensorFlow restart. Purging expired events from Tensorboard"
|
| 923 |
+
" display between the previous step: {} (timestamp: {}) and "
|
| 924 |
+
"current step: {} (timestamp: {}). Removing {} scalars, {} "
|
| 925 |
+
"histograms, {} compressed histograms, {} images, "
|
| 926 |
+
"and {} audio."
|
| 927 |
+
).format(
|
| 928 |
+
most_recent_step,
|
| 929 |
+
most_recent_wall_time,
|
| 930 |
+
event_step,
|
| 931 |
+
event_wall_time,
|
| 932 |
+
num_expired_scalars,
|
| 933 |
+
num_expired_histos,
|
| 934 |
+
num_expired_comp_histos,
|
| 935 |
+
num_expired_images,
|
| 936 |
+
num_expired_audio,
|
| 937 |
+
)
|
| 938 |
+
|
| 939 |
+
|
| 940 |
+
def _GeneratorFromPath(path):
|
| 941 |
+
"""Create an event generator for file or directory at given path string."""
|
| 942 |
+
if not path:
|
| 943 |
+
raise ValueError("path must be a valid string")
|
| 944 |
+
if io_wrapper.IsSummaryEventsFile(path):
|
| 945 |
+
return event_file_loader.LegacyEventFileLoader(path)
|
| 946 |
+
else:
|
| 947 |
+
return directory_watcher.DirectoryWatcher(
|
| 948 |
+
path,
|
| 949 |
+
event_file_loader.LegacyEventFileLoader,
|
| 950 |
+
io_wrapper.IsSummaryEventsFile,
|
| 951 |
+
)
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_file_inspector.py
ADDED
|
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""Logic for TensorBoard inspector to help humans investigate event files.
|
| 17 |
+
|
| 18 |
+
Example usages:
|
| 19 |
+
tensorboard --inspect --event_file myevents.out
|
| 20 |
+
tensorboard --inspect --event_file myevents.out --tag loss
|
| 21 |
+
tensorboard --inspect --logdir mylogdir
|
| 22 |
+
tensorboard --inspect --logdir mylogdir --tag loss
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
This script runs over a logdir and creates an InspectionUnit for every
|
| 26 |
+
subdirectory with event files. If running over an event file, it creates only
|
| 27 |
+
one InspectionUnit. One block of output is printed to console for each
|
| 28 |
+
InspectionUnit.
|
| 29 |
+
|
| 30 |
+
The primary content of an InspectionUnit is the dict field_to_obs that maps
|
| 31 |
+
fields (e.g. "scalar", "histogram", "session_log:start", etc.) to a list of
|
| 32 |
+
Observations for the field. Observations correspond one-to-one with Events in an
|
| 33 |
+
event file but contain less information because they only store what is
|
| 34 |
+
necessary to generate the final console output.
|
| 35 |
+
|
| 36 |
+
The final output is rendered to console by applying some aggregating function
|
| 37 |
+
to the lists of Observations. Different functions are applied depending on the
|
| 38 |
+
type of field. For instance, for "scalar" fields, the inspector shows aggregate
|
| 39 |
+
statistics. For other fields like "session_log:start", all observed steps are
|
| 40 |
+
printed in order to aid debugging.
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
[1] Query a logdir or an event file for its logged tags and summary statistics
|
| 44 |
+
using --logdir or --event_file.
|
| 45 |
+
|
| 46 |
+
[[event_file]] contains these tags:
|
| 47 |
+
histograms
|
| 48 |
+
binary/Sign/Activations
|
| 49 |
+
binary/nn_tanh/act/Activations
|
| 50 |
+
binary/nn_tanh/biases
|
| 51 |
+
binary/nn_tanh/biases:gradient
|
| 52 |
+
binary/nn_tanh/weights
|
| 53 |
+
binary/nn_tanh/weights:gradient
|
| 54 |
+
images
|
| 55 |
+
input_images/image/0
|
| 56 |
+
input_images/image/1
|
| 57 |
+
input_images/image/2
|
| 58 |
+
scalars
|
| 59 |
+
Learning Rate
|
| 60 |
+
Total Cost
|
| 61 |
+
Total Cost (raw)
|
| 62 |
+
|
| 63 |
+
Debug output aggregated over all tags:
|
| 64 |
+
graph
|
| 65 |
+
first_step 0
|
| 66 |
+
last_step 0
|
| 67 |
+
max_step 0
|
| 68 |
+
min_step 0
|
| 69 |
+
num_steps 1
|
| 70 |
+
outoforder_steps []
|
| 71 |
+
histograms
|
| 72 |
+
first_step 491
|
| 73 |
+
last_step 659823
|
| 74 |
+
max_step 659823
|
| 75 |
+
min_step 491
|
| 76 |
+
num_steps 993
|
| 77 |
+
outoforder_steps []
|
| 78 |
+
images -
|
| 79 |
+
scalars
|
| 80 |
+
first_step 0
|
| 81 |
+
last_step 659823
|
| 82 |
+
max_step 659823
|
| 83 |
+
min_step 0
|
| 84 |
+
num_steps 1985
|
| 85 |
+
outoforder_steps []
|
| 86 |
+
sessionlog:checkpoint
|
| 87 |
+
first_step 7129
|
| 88 |
+
last_step 657167
|
| 89 |
+
max_step 657167
|
| 90 |
+
min_step 7129
|
| 91 |
+
num_steps 99
|
| 92 |
+
outoforder_steps []
|
| 93 |
+
sessionlog:start
|
| 94 |
+
outoforder_steps []
|
| 95 |
+
steps [0L]
|
| 96 |
+
sessionlog:stop -
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
[2] Drill down into a particular tag using --tag.
|
| 100 |
+
|
| 101 |
+
Debug output for binary/Sign/Activations:
|
| 102 |
+
histograms
|
| 103 |
+
first_step 491
|
| 104 |
+
last_step 659823
|
| 105 |
+
max_step 659823
|
| 106 |
+
min_step 491
|
| 107 |
+
num_steps 993
|
| 108 |
+
outoforder_steps []
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
import dataclasses
|
| 113 |
+
import itertools
|
| 114 |
+
import os
|
| 115 |
+
|
| 116 |
+
from typing import Any, Generator, Mapping
|
| 117 |
+
|
| 118 |
+
from tensorboard.backend.event_processing import event_accumulator
|
| 119 |
+
from tensorboard.backend.event_processing import event_file_loader
|
| 120 |
+
from tensorboard.backend.event_processing import io_wrapper
|
| 121 |
+
from tensorboard.compat import tf
|
| 122 |
+
from tensorboard.compat.proto import event_pb2
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# Map of field names within summary.proto to the user-facing names that this
|
| 126 |
+
# script outputs.
|
| 127 |
+
SUMMARY_TYPE_TO_FIELD = {
|
| 128 |
+
"simple_value": "scalars",
|
| 129 |
+
"histo": "histograms",
|
| 130 |
+
"image": "images",
|
| 131 |
+
"audio": "audio",
|
| 132 |
+
}
|
| 133 |
+
for summary_type in event_accumulator.SUMMARY_TYPES:
|
| 134 |
+
if summary_type not in SUMMARY_TYPE_TO_FIELD:
|
| 135 |
+
SUMMARY_TYPE_TO_FIELD[summary_type] = summary_type
|
| 136 |
+
|
| 137 |
+
# Types of summaries that we may want to query for by tag.
|
| 138 |
+
TAG_FIELDS = list(SUMMARY_TYPE_TO_FIELD.values())
|
| 139 |
+
|
| 140 |
+
# Summaries that we want to see every instance of.
|
| 141 |
+
LONG_FIELDS = ["sessionlog:start", "sessionlog:stop"]
|
| 142 |
+
|
| 143 |
+
# Summaries that we only want an abridged digest of, since they would
|
| 144 |
+
# take too much screen real estate otherwise.
|
| 145 |
+
SHORT_FIELDS = ["graph", "sessionlog:checkpoint"] + TAG_FIELDS
|
| 146 |
+
|
| 147 |
+
# All summary types that we can inspect.
|
| 148 |
+
TRACKED_FIELDS = SHORT_FIELDS + LONG_FIELDS
|
| 149 |
+
|
| 150 |
+
PRINT_SEPARATOR = "=" * 70 + "\n"
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
@dataclasses.dataclass(frozen=True)
|
| 154 |
+
class Observation:
|
| 155 |
+
"""Contains the data within each Event file that the inspector cares about.
|
| 156 |
+
|
| 157 |
+
The inspector accumulates Observations as it processes events.
|
| 158 |
+
|
| 159 |
+
Attributes:
|
| 160 |
+
step: Global step of the event.
|
| 161 |
+
wall_time: Timestamp of the event in seconds.
|
| 162 |
+
tag: Tag name associated with the event.
|
| 163 |
+
"""
|
| 164 |
+
|
| 165 |
+
step: int
|
| 166 |
+
wall_time: float
|
| 167 |
+
tag: str
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@dataclasses.dataclass(frozen=True)
|
| 171 |
+
class InspectionUnit:
|
| 172 |
+
"""Created for each organizational structure in the event files.
|
| 173 |
+
|
| 174 |
+
An InspectionUnit is visible in the final terminal output. For instance, one
|
| 175 |
+
InspectionUnit is created for each subdirectory in logdir. When asked to inspect
|
| 176 |
+
a single event file, there may only be one InspectionUnit.
|
| 177 |
+
|
| 178 |
+
Attributes:
|
| 179 |
+
name: Name of the organizational unit that will be printed to console.
|
| 180 |
+
generator: A generator that yields `Event` protos.
|
| 181 |
+
field_to_obs: A mapping from string fields to `Observations` that the inspector
|
| 182 |
+
creates.
|
| 183 |
+
"""
|
| 184 |
+
|
| 185 |
+
name: str
|
| 186 |
+
generator: Generator[event_pb2.Event, Any, Any]
|
| 187 |
+
field_to_obs: Mapping[str, Observation]
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def get_field_to_observations_map(generator, query_for_tag=""):
|
| 191 |
+
"""Return a field to `Observations` dict for the event generator.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
generator: A generator over event protos.
|
| 195 |
+
query_for_tag: A string that if specified, only create observations for
|
| 196 |
+
events with this tag name.
|
| 197 |
+
|
| 198 |
+
Returns:
|
| 199 |
+
A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.
|
| 200 |
+
"""
|
| 201 |
+
|
| 202 |
+
def increment(stat, event, tag=""):
|
| 203 |
+
assert stat in TRACKED_FIELDS
|
| 204 |
+
field_to_obs[stat].append(
|
| 205 |
+
dataclasses.asdict(
|
| 206 |
+
Observation(step=event.step, wall_time=event.wall_time, tag=tag)
|
| 207 |
+
)
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])
|
| 211 |
+
|
| 212 |
+
for event in generator:
|
| 213 |
+
## Process the event
|
| 214 |
+
if event.HasField("graph_def") and (not query_for_tag):
|
| 215 |
+
increment("graph", event)
|
| 216 |
+
if event.HasField("session_log") and (not query_for_tag):
|
| 217 |
+
status = event.session_log.status
|
| 218 |
+
if status == event_pb2.SessionLog.START:
|
| 219 |
+
increment("sessionlog:start", event)
|
| 220 |
+
elif status == event_pb2.SessionLog.STOP:
|
| 221 |
+
increment("sessionlog:stop", event)
|
| 222 |
+
elif status == event_pb2.SessionLog.CHECKPOINT:
|
| 223 |
+
increment("sessionlog:checkpoint", event)
|
| 224 |
+
elif event.HasField("summary"):
|
| 225 |
+
for value in event.summary.value:
|
| 226 |
+
if query_for_tag and value.tag != query_for_tag:
|
| 227 |
+
continue
|
| 228 |
+
|
| 229 |
+
for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items():
|
| 230 |
+
if value.HasField(proto_name):
|
| 231 |
+
increment(display_name, event, value.tag)
|
| 232 |
+
return field_to_obs
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def get_unique_tags(field_to_obs):
|
| 236 |
+
"""Returns a dictionary of tags that a user could query over.
|
| 237 |
+
|
| 238 |
+
Args:
|
| 239 |
+
field_to_obs: Dict that maps string field to `Observation` list.
|
| 240 |
+
|
| 241 |
+
Returns:
|
| 242 |
+
A dict that maps keys in `TAG_FIELDS` to a list of string tags present in
|
| 243 |
+
the event files. If the dict does not have any observations of the type,
|
| 244 |
+
maps to an empty list so that we can render this to console.
|
| 245 |
+
"""
|
| 246 |
+
return {
|
| 247 |
+
field: sorted(set([x.get("tag", "") for x in observations]))
|
| 248 |
+
for field, observations in field_to_obs.items()
|
| 249 |
+
if field in TAG_FIELDS
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def print_dict(d, show_missing=True):
|
| 254 |
+
"""Prints a shallow dict to console.
|
| 255 |
+
|
| 256 |
+
Args:
|
| 257 |
+
d: Dict to print.
|
| 258 |
+
show_missing: Whether to show keys with empty values.
|
| 259 |
+
"""
|
| 260 |
+
for k, v in sorted(d.items()):
|
| 261 |
+
if (not v) and show_missing:
|
| 262 |
+
# No instances of the key, so print missing symbol.
|
| 263 |
+
print("{} -".format(k))
|
| 264 |
+
elif isinstance(v, list):
|
| 265 |
+
# Value is a list, so print each item of the list.
|
| 266 |
+
print(k)
|
| 267 |
+
for item in v:
|
| 268 |
+
print(" {}".format(item))
|
| 269 |
+
elif isinstance(v, dict):
|
| 270 |
+
# Value is a dict, so print each (key, value) pair of the dict.
|
| 271 |
+
print(k)
|
| 272 |
+
for kk, vv in sorted(v.items()):
|
| 273 |
+
print(" {:<20} {}".format(kk, vv))
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def get_dict_to_print(field_to_obs):
|
| 277 |
+
"""Transform the field-to-obs mapping into a printable dictionary.
|
| 278 |
+
|
| 279 |
+
Args:
|
| 280 |
+
field_to_obs: Dict that maps string field to `Observation` list.
|
| 281 |
+
|
| 282 |
+
Returns:
|
| 283 |
+
A dict with the keys and values to print to console.
|
| 284 |
+
"""
|
| 285 |
+
|
| 286 |
+
def compressed_steps(steps):
|
| 287 |
+
return {
|
| 288 |
+
"num_steps": len(set(steps)),
|
| 289 |
+
"min_step": min(steps),
|
| 290 |
+
"max_step": max(steps),
|
| 291 |
+
"last_step": steps[-1],
|
| 292 |
+
"first_step": steps[0],
|
| 293 |
+
"outoforder_steps": get_out_of_order(steps),
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
def full_steps(steps):
|
| 297 |
+
return {"steps": steps, "outoforder_steps": get_out_of_order(steps)}
|
| 298 |
+
|
| 299 |
+
output = {}
|
| 300 |
+
for field, observations in field_to_obs.items():
|
| 301 |
+
if not observations:
|
| 302 |
+
output[field] = None
|
| 303 |
+
continue
|
| 304 |
+
|
| 305 |
+
steps = [x["step"] for x in observations]
|
| 306 |
+
if field in SHORT_FIELDS:
|
| 307 |
+
output[field] = compressed_steps(steps)
|
| 308 |
+
if field in LONG_FIELDS:
|
| 309 |
+
output[field] = full_steps(steps)
|
| 310 |
+
|
| 311 |
+
return output
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def get_out_of_order(list_of_numbers):
|
| 315 |
+
"""Returns elements that break the monotonically non-decreasing trend.
|
| 316 |
+
|
| 317 |
+
This is used to find instances of global step values that are "out-of-order",
|
| 318 |
+
which may trigger TensorBoard event discarding logic.
|
| 319 |
+
|
| 320 |
+
Args:
|
| 321 |
+
list_of_numbers: A list of numbers.
|
| 322 |
+
|
| 323 |
+
Returns:
|
| 324 |
+
A list of tuples in which each tuple are two elements are adjacent, but the
|
| 325 |
+
second element is lower than the first.
|
| 326 |
+
"""
|
| 327 |
+
# TODO: Consider changing this to only check for out-of-order
|
| 328 |
+
# steps within a particular tag.
|
| 329 |
+
result = []
|
| 330 |
+
# pylint: disable=consider-using-enumerate
|
| 331 |
+
for i in range(len(list_of_numbers)):
|
| 332 |
+
if i == 0:
|
| 333 |
+
continue
|
| 334 |
+
if list_of_numbers[i] < list_of_numbers[i - 1]:
|
| 335 |
+
result.append((list_of_numbers[i - 1], list_of_numbers[i]))
|
| 336 |
+
return result
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def generators_from_logdir(logdir):
|
| 340 |
+
"""Returns a list of event generators for subdirectories with event files.
|
| 341 |
+
|
| 342 |
+
The number of generators returned should equal the number of directories
|
| 343 |
+
within logdir that contain event files. If only logdir contains event files,
|
| 344 |
+
returns a list of length one.
|
| 345 |
+
|
| 346 |
+
Args:
|
| 347 |
+
logdir: A log directory that contains event files.
|
| 348 |
+
|
| 349 |
+
Returns:
|
| 350 |
+
List of event generators for each subdirectory with event files.
|
| 351 |
+
"""
|
| 352 |
+
subdirs = io_wrapper.GetLogdirSubdirectories(logdir)
|
| 353 |
+
generators = [
|
| 354 |
+
itertools.chain(
|
| 355 |
+
*[
|
| 356 |
+
generator_from_event_file(os.path.join(subdir, f))
|
| 357 |
+
for f in tf.io.gfile.listdir(subdir)
|
| 358 |
+
if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))
|
| 359 |
+
]
|
| 360 |
+
)
|
| 361 |
+
for subdir in subdirs
|
| 362 |
+
]
|
| 363 |
+
return generators
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def generator_from_event_file(event_file):
|
| 367 |
+
"""Returns a generator that yields events from an event file."""
|
| 368 |
+
return event_file_loader.LegacyEventFileLoader(event_file).Load()
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def get_inspection_units(logdir="", event_file="", tag=""):
|
| 372 |
+
"""Returns a list of InspectionUnit objects given either logdir or
|
| 373 |
+
event_file.
|
| 374 |
+
|
| 375 |
+
If logdir is given, the number of InspectionUnits should equal the
|
| 376 |
+
number of directories or subdirectories that contain event files.
|
| 377 |
+
|
| 378 |
+
If event_file is given, the number of InspectionUnits should be 1.
|
| 379 |
+
|
| 380 |
+
Args:
|
| 381 |
+
logdir: A log directory that contains event files.
|
| 382 |
+
event_file: Or, a particular event file path.
|
| 383 |
+
tag: An optional tag name to query for.
|
| 384 |
+
|
| 385 |
+
Returns:
|
| 386 |
+
A list of InspectionUnit objects.
|
| 387 |
+
"""
|
| 388 |
+
if logdir:
|
| 389 |
+
subdirs = io_wrapper.GetLogdirSubdirectories(logdir)
|
| 390 |
+
inspection_units = []
|
| 391 |
+
for subdir in subdirs:
|
| 392 |
+
generator = itertools.chain(
|
| 393 |
+
*[
|
| 394 |
+
generator_from_event_file(os.path.join(subdir, f))
|
| 395 |
+
for f in tf.io.gfile.listdir(subdir)
|
| 396 |
+
if io_wrapper.IsTensorFlowEventsFile(
|
| 397 |
+
os.path.join(subdir, f)
|
| 398 |
+
)
|
| 399 |
+
]
|
| 400 |
+
)
|
| 401 |
+
inspection_units.append(
|
| 402 |
+
InspectionUnit(
|
| 403 |
+
name=subdir,
|
| 404 |
+
generator=generator,
|
| 405 |
+
field_to_obs=get_field_to_observations_map(generator, tag),
|
| 406 |
+
)
|
| 407 |
+
)
|
| 408 |
+
if inspection_units:
|
| 409 |
+
print(
|
| 410 |
+
"Found event files in:\n{}\n".format(
|
| 411 |
+
"\n".join([u.name for u in inspection_units])
|
| 412 |
+
)
|
| 413 |
+
)
|
| 414 |
+
elif io_wrapper.IsTensorFlowEventsFile(logdir):
|
| 415 |
+
print(
|
| 416 |
+
"It seems that {} may be an event file instead of a logdir. If this "
|
| 417 |
+
"is the case, use --event_file instead of --logdir to pass "
|
| 418 |
+
"it in.".format(logdir)
|
| 419 |
+
)
|
| 420 |
+
else:
|
| 421 |
+
print("No event files found within logdir {}".format(logdir))
|
| 422 |
+
return inspection_units
|
| 423 |
+
elif event_file:
|
| 424 |
+
generator = generator_from_event_file(event_file)
|
| 425 |
+
return [
|
| 426 |
+
InspectionUnit(
|
| 427 |
+
name=event_file,
|
| 428 |
+
generator=generator,
|
| 429 |
+
field_to_obs=get_field_to_observations_map(generator, tag),
|
| 430 |
+
)
|
| 431 |
+
]
|
| 432 |
+
return []
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def inspect(logdir="", event_file="", tag=""):
|
| 436 |
+
"""Main function for inspector that prints out a digest of event files.
|
| 437 |
+
|
| 438 |
+
Args:
|
| 439 |
+
logdir: A log directory that contains event files.
|
| 440 |
+
event_file: Or, a particular event file path.
|
| 441 |
+
tag: An optional tag name to query for.
|
| 442 |
+
|
| 443 |
+
Raises:
|
| 444 |
+
ValueError: If neither logdir and event_file are given, or both are given.
|
| 445 |
+
"""
|
| 446 |
+
print(
|
| 447 |
+
PRINT_SEPARATOR
|
| 448 |
+
+ "Processing event files... (this can take a few minutes)\n"
|
| 449 |
+
+ PRINT_SEPARATOR
|
| 450 |
+
)
|
| 451 |
+
inspection_units = get_inspection_units(logdir, event_file, tag)
|
| 452 |
+
|
| 453 |
+
for unit in inspection_units:
|
| 454 |
+
if tag:
|
| 455 |
+
print("Event statistics for tag {} in {}:".format(tag, unit.name))
|
| 456 |
+
else:
|
| 457 |
+
# If the user is not inspecting a particular tag, also print the list of
|
| 458 |
+
# all available tags that they can query.
|
| 459 |
+
print("These tags are in {}:".format(unit.name))
|
| 460 |
+
print_dict(get_unique_tags(unit.field_to_obs))
|
| 461 |
+
print(PRINT_SEPARATOR)
|
| 462 |
+
print("Event statistics for {}:".format(unit.name))
|
| 463 |
+
|
| 464 |
+
print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag))
|
| 465 |
+
print(PRINT_SEPARATOR)
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_file_loader.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""Functionality for loading events from a record file."""
|
| 17 |
+
|
| 18 |
+
import contextlib
|
| 19 |
+
|
| 20 |
+
from tensorboard import data_compat
|
| 21 |
+
from tensorboard import dataclass_compat
|
| 22 |
+
from tensorboard.compat import tf
|
| 23 |
+
from tensorboard.compat.proto import event_pb2
|
| 24 |
+
from tensorboard.util import platform_util
|
| 25 |
+
from tensorboard.util import tb_logging
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
logger = tb_logging.get_logger()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@contextlib.contextmanager
|
| 32 |
+
def _nullcontext():
|
| 33 |
+
"""Pre-Python-3.7-compatible standin for contextlib.nullcontext."""
|
| 34 |
+
yield
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# Might as well make this a singleton.
|
| 38 |
+
_NULLCONTEXT = _nullcontext()
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _silence_deprecation_warnings():
|
| 42 |
+
"""Context manager that best-effort silences TF deprecation warnings."""
|
| 43 |
+
try:
|
| 44 |
+
# Learn this one weird trick to make TF deprecation warnings go away.
|
| 45 |
+
from tensorflow.python.util import deprecation
|
| 46 |
+
|
| 47 |
+
return deprecation.silence()
|
| 48 |
+
except (ImportError, AttributeError):
|
| 49 |
+
return _NULLCONTEXT
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _make_tf_record_iterator(file_path):
|
| 53 |
+
"""Returns an iterator over TF records for the given tfrecord file."""
|
| 54 |
+
# If we don't have TF at all, use the stub implementation.
|
| 55 |
+
if tf.__version__ == "stub":
|
| 56 |
+
# TODO(#1711): Reshape stub implementation to fit tf_record_iterator API
|
| 57 |
+
# rather than needlessly emulating the old PyRecordReader_New API.
|
| 58 |
+
logger.debug("Opening a stub record reader pointing at %s", file_path)
|
| 59 |
+
return _PyRecordReaderIterator(
|
| 60 |
+
tf.pywrap_tensorflow.PyRecordReader_New, file_path
|
| 61 |
+
)
|
| 62 |
+
# If PyRecordReader exists, use it, otherwise use tf_record_iterator().
|
| 63 |
+
# Check old first, then new, since tf_record_iterator existed previously but
|
| 64 |
+
# only gained the semantics we need at the time PyRecordReader was removed.
|
| 65 |
+
#
|
| 66 |
+
# TODO(#1711): Eventually remove PyRecordReader fallback once we can drop
|
| 67 |
+
# support for TF 2.1 and prior, and find a non-deprecated replacement for
|
| 68 |
+
# tf.compat.v1.io.tf_record_iterator.
|
| 69 |
+
try:
|
| 70 |
+
from tensorflow.python import pywrap_tensorflow
|
| 71 |
+
|
| 72 |
+
py_record_reader_new = pywrap_tensorflow.PyRecordReader_New
|
| 73 |
+
except (ImportError, AttributeError):
|
| 74 |
+
py_record_reader_new = None
|
| 75 |
+
if py_record_reader_new:
|
| 76 |
+
logger.debug("Opening a PyRecordReader pointing at %s", file_path)
|
| 77 |
+
return _PyRecordReaderIterator(py_record_reader_new, file_path)
|
| 78 |
+
else:
|
| 79 |
+
logger.debug("Opening a tf_record_iterator pointing at %s", file_path)
|
| 80 |
+
# TODO(#1711): Find non-deprecated replacement for tf_record_iterator.
|
| 81 |
+
with _silence_deprecation_warnings():
|
| 82 |
+
return tf.compat.v1.io.tf_record_iterator(file_path)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class _PyRecordReaderIterator:
|
| 86 |
+
"""Python iterator for TF Records based on PyRecordReader."""
|
| 87 |
+
|
| 88 |
+
def __init__(self, py_record_reader_new, file_path):
|
| 89 |
+
"""Constructs a _PyRecordReaderIterator for the given file path.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
py_record_reader_new: pywrap_tensorflow.PyRecordReader_New
|
| 93 |
+
file_path: file path of the tfrecord file to read
|
| 94 |
+
"""
|
| 95 |
+
with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status:
|
| 96 |
+
self._reader = py_record_reader_new(
|
| 97 |
+
tf.compat.as_bytes(file_path), 0, tf.compat.as_bytes(""), status
|
| 98 |
+
)
|
| 99 |
+
if not self._reader:
|
| 100 |
+
raise IOError(
|
| 101 |
+
"Failed to open a record reader pointing to %s" % file_path
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def __iter__(self):
|
| 105 |
+
return self
|
| 106 |
+
|
| 107 |
+
def __next__(self):
|
| 108 |
+
try:
|
| 109 |
+
self._reader.GetNext()
|
| 110 |
+
except tf.errors.OutOfRangeError as e:
|
| 111 |
+
raise StopIteration
|
| 112 |
+
return self._reader.record()
|
| 113 |
+
|
| 114 |
+
next = __next__ # for python2 compatibility
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class RawEventFileLoader:
|
| 118 |
+
"""An iterator that yields Event protos as serialized bytestrings."""
|
| 119 |
+
|
| 120 |
+
def __init__(self, file_path, detect_file_replacement=False):
|
| 121 |
+
"""Constructs a RawEventFileLoader for the given file path.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
file_path: the event file path to read from
|
| 125 |
+
detect_file_replacement: if True, when Load() is called, the loader
|
| 126 |
+
will make a stat() call to check the size of the file. If it sees
|
| 127 |
+
that the file has grown, it will reopen the file entirely (while
|
| 128 |
+
preserving the current offset) before attempting to read from it.
|
| 129 |
+
Otherwise, Load() will simply poll at EOF for new data.
|
| 130 |
+
"""
|
| 131 |
+
if file_path is None:
|
| 132 |
+
raise ValueError("A file path is required")
|
| 133 |
+
self._file_path = platform_util.readahead_file_path(file_path)
|
| 134 |
+
self._detect_file_replacement = detect_file_replacement
|
| 135 |
+
self._file_size = None
|
| 136 |
+
self._iterator = _make_tf_record_iterator(self._file_path)
|
| 137 |
+
if self._detect_file_replacement and not hasattr(
|
| 138 |
+
self._iterator, "reopen"
|
| 139 |
+
):
|
| 140 |
+
logger.warning(
|
| 141 |
+
"File replacement detection requested, but not enabled because "
|
| 142 |
+
"TF record iterator impl does not support reopening. This "
|
| 143 |
+
"functionality requires TensorFlow 2.9+"
|
| 144 |
+
)
|
| 145 |
+
self._detect_file_replacement = False
|
| 146 |
+
|
| 147 |
+
def Load(self):
|
| 148 |
+
"""Loads all new events from disk as raw serialized proto bytestrings.
|
| 149 |
+
|
| 150 |
+
Calling Load multiple times in a row will not 'drop' events as long as the
|
| 151 |
+
return value is not iterated over.
|
| 152 |
+
|
| 153 |
+
Yields:
|
| 154 |
+
All event proto bytestrings in the file that have not been yielded yet.
|
| 155 |
+
"""
|
| 156 |
+
logger.debug("Loading events from %s", self._file_path)
|
| 157 |
+
if self._detect_file_replacement:
|
| 158 |
+
has_increased = self.CheckForIncreasedFileSize()
|
| 159 |
+
# Only act on the file size information if we got a concrete result.
|
| 160 |
+
if has_increased is not None:
|
| 161 |
+
if has_increased:
|
| 162 |
+
logger.debug(
|
| 163 |
+
"Reopening %s since file size has changed",
|
| 164 |
+
self._file_path,
|
| 165 |
+
)
|
| 166 |
+
self._iterator.close()
|
| 167 |
+
self._iterator.reopen()
|
| 168 |
+
else:
|
| 169 |
+
logger.debug(
|
| 170 |
+
"Skipping attempt to poll %s since file size has not "
|
| 171 |
+
"changed (still %d)",
|
| 172 |
+
self._file_path,
|
| 173 |
+
self._file_size,
|
| 174 |
+
)
|
| 175 |
+
return
|
| 176 |
+
while True:
|
| 177 |
+
try:
|
| 178 |
+
yield next(self._iterator)
|
| 179 |
+
except StopIteration:
|
| 180 |
+
logger.debug("End of file in %s", self._file_path)
|
| 181 |
+
break
|
| 182 |
+
except tf.errors.DataLossError as e:
|
| 183 |
+
# We swallow partial read exceptions; if the record was truncated
|
| 184 |
+
# and a later update completes it, retrying can then resume from
|
| 185 |
+
# the same point in the file since the iterator holds the offset.
|
| 186 |
+
logger.debug("Truncated record in %s (%s)", self._file_path, e)
|
| 187 |
+
break
|
| 188 |
+
logger.debug("No more events in %s", self._file_path)
|
| 189 |
+
|
| 190 |
+
def CheckForIncreasedFileSize(self):
|
| 191 |
+
"""Stats the file to get its updated size, returning True if it grew.
|
| 192 |
+
|
| 193 |
+
If the stat call fails or reports a smaller size than was previously
|
| 194 |
+
seen, then any previously cached size is left unchanged.
|
| 195 |
+
|
| 196 |
+
Returns:
|
| 197 |
+
boolean or None: True if the file size increased; False if it was
|
| 198 |
+
the same or decreased; or None if neither case could be detected
|
| 199 |
+
(either because the previous size had not been recorded yet, or
|
| 200 |
+
because the stat call for the current size failed).
|
| 201 |
+
"""
|
| 202 |
+
previous_size = self._file_size
|
| 203 |
+
try:
|
| 204 |
+
self._file_size = tf.io.gfile.stat(self._file_path).length
|
| 205 |
+
except tf.errors.OpError as e:
|
| 206 |
+
logger.error("Failed to stat %s: %s", self._file_path, e)
|
| 207 |
+
return None
|
| 208 |
+
logger.debug(
|
| 209 |
+
"Stat on %s got size %d, previous size %s",
|
| 210 |
+
self._file_path,
|
| 211 |
+
self._file_size,
|
| 212 |
+
previous_size,
|
| 213 |
+
)
|
| 214 |
+
if previous_size is None:
|
| 215 |
+
return None
|
| 216 |
+
if self._file_size > previous_size:
|
| 217 |
+
return True
|
| 218 |
+
if self._file_size < previous_size:
|
| 219 |
+
logger.warning(
|
| 220 |
+
"File %s shrank from previous size %d to size %d",
|
| 221 |
+
self._file_path,
|
| 222 |
+
previous_size,
|
| 223 |
+
self._file_size,
|
| 224 |
+
)
|
| 225 |
+
# In case this was transient, preserve the previously cached size,
|
| 226 |
+
# to avoid reporting a spurious increase next time. If the file was
|
| 227 |
+
# actually truncated, we can't recover anyway, so just ignore it.
|
| 228 |
+
self._file_size = previous_size
|
| 229 |
+
return False
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
class LegacyEventFileLoader(RawEventFileLoader):
|
| 233 |
+
"""An iterator that yields parsed Event protos."""
|
| 234 |
+
|
| 235 |
+
def Load(self):
|
| 236 |
+
"""Loads all new events from disk.
|
| 237 |
+
|
| 238 |
+
Calling Load multiple times in a row will not 'drop' events as long as the
|
| 239 |
+
return value is not iterated over.
|
| 240 |
+
|
| 241 |
+
Yields:
|
| 242 |
+
All events in the file that have not been yielded yet.
|
| 243 |
+
"""
|
| 244 |
+
for record in super().Load():
|
| 245 |
+
yield event_pb2.Event.FromString(record)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
class EventFileLoader(LegacyEventFileLoader):
|
| 249 |
+
"""An iterator that passes events through read-time compat layers.
|
| 250 |
+
|
| 251 |
+
Specifically, this includes `data_compat` and `dataclass_compat`.
|
| 252 |
+
"""
|
| 253 |
+
|
| 254 |
+
def __init__(self, *args, **kwargs):
|
| 255 |
+
super().__init__(*args, **kwargs)
|
| 256 |
+
# Track initial metadata for each tag, for `dataclass_compat`.
|
| 257 |
+
# This is meant to be tracked per run, not per event file, so
|
| 258 |
+
# there is a potential failure case when the second event file
|
| 259 |
+
# in a single run has no summary metadata. This only occurs when
|
| 260 |
+
# all of the following hold: (a) the events were written with
|
| 261 |
+
# the TensorFlow 1.x (not 2.x) writer, (b) the summaries were
|
| 262 |
+
# created by `tensorboard.summary.v1` ops and so do not undergo
|
| 263 |
+
# `data_compat` transformation, and (c) the file writer was
|
| 264 |
+
# reopened by calling `.reopen()` on it, which creates a new
|
| 265 |
+
# file but does not clear the tag cache. This is considered
|
| 266 |
+
# sufficiently improbable that we don't take extra mitigations.
|
| 267 |
+
self._initial_metadata = {} # from tag name to `SummaryMetadata`
|
| 268 |
+
|
| 269 |
+
def Load(self):
|
| 270 |
+
for event in super().Load():
|
| 271 |
+
event = data_compat.migrate_event(event)
|
| 272 |
+
events = dataclass_compat.migrate_event(
|
| 273 |
+
event, self._initial_metadata
|
| 274 |
+
)
|
| 275 |
+
for event in events:
|
| 276 |
+
yield event
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class TimestampedEventFileLoader(EventFileLoader):
|
| 280 |
+
"""An iterator that yields (UNIX timestamp float, Event proto) pairs."""
|
| 281 |
+
|
| 282 |
+
def Load(self):
|
| 283 |
+
"""Loads all new events and their wall time values from disk.
|
| 284 |
+
|
| 285 |
+
Calling Load multiple times in a row will not 'drop' events as long as the
|
| 286 |
+
return value is not iterated over.
|
| 287 |
+
|
| 288 |
+
Yields:
|
| 289 |
+
Pairs of (UNIX timestamp float, Event proto) for all events in the file
|
| 290 |
+
that have not been yielded yet.
|
| 291 |
+
"""
|
| 292 |
+
for event in super().Load():
|
| 293 |
+
yield (event.wall_time, event)
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_multiplexer.py
ADDED
|
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Provides an interface for working with multiple event files."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import threading
|
| 20 |
+
|
| 21 |
+
from typing import Optional
|
| 22 |
+
|
| 23 |
+
from tensorboard.backend.event_processing import directory_watcher
|
| 24 |
+
from tensorboard.backend.event_processing import event_accumulator
|
| 25 |
+
from tensorboard.backend.event_processing import io_wrapper
|
| 26 |
+
from tensorboard.util import tb_logging
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
logger = tb_logging.get_logger()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class EventMultiplexer:
|
| 33 |
+
"""An `EventMultiplexer` manages access to multiple `EventAccumulator`s.
|
| 34 |
+
|
| 35 |
+
Each `EventAccumulator` is associated with a `run`, which is a self-contained
|
| 36 |
+
TensorFlow execution. The `EventMultiplexer` provides methods for extracting
|
| 37 |
+
information about events from multiple `run`s.
|
| 38 |
+
|
| 39 |
+
Example usage for loading specific runs from files:
|
| 40 |
+
|
| 41 |
+
```python
|
| 42 |
+
x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})
|
| 43 |
+
x.Reload()
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
Example usage for loading a directory where each subdirectory is a run
|
| 47 |
+
|
| 48 |
+
```python
|
| 49 |
+
(eg:) /parent/directory/path/
|
| 50 |
+
/parent/directory/path/run1/
|
| 51 |
+
/parent/directory/path/run1/events.out.tfevents.1001
|
| 52 |
+
/parent/directory/path/run1/events.out.tfevents.1002
|
| 53 |
+
|
| 54 |
+
/parent/directory/path/run2/
|
| 55 |
+
/parent/directory/path/run2/events.out.tfevents.9232
|
| 56 |
+
|
| 57 |
+
/parent/directory/path/run3/
|
| 58 |
+
/parent/directory/path/run3/events.out.tfevents.9232
|
| 59 |
+
x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')
|
| 60 |
+
(which is equivalent to:)
|
| 61 |
+
x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
If you would like to watch `/parent/directory/path`, wait for it to be created
|
| 65 |
+
(if necessary) and then periodically pick up new runs, use
|
| 66 |
+
`AutoloadingMultiplexer`
|
| 67 |
+
@@Tensors
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
def __init__(
|
| 71 |
+
self, run_path_map=None, size_guidance=None, purge_orphaned_data=True
|
| 72 |
+
):
|
| 73 |
+
"""Constructor for the `EventMultiplexer`.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
run_path_map: Dict `{run: path}` which specifies the
|
| 77 |
+
name of a run, and the path to find the associated events. If it is
|
| 78 |
+
None, then the EventMultiplexer initializes without any runs.
|
| 79 |
+
size_guidance: A dictionary mapping from `tagType` to the number of items
|
| 80 |
+
to store for each tag of that type. See
|
| 81 |
+
`event_accumulator.EventAccumulator` for details.
|
| 82 |
+
purge_orphaned_data: Whether to discard any events that were "orphaned" by
|
| 83 |
+
a TensorFlow restart.
|
| 84 |
+
"""
|
| 85 |
+
logger.info("Event Multiplexer initializing.")
|
| 86 |
+
self._accumulators_mutex = threading.Lock()
|
| 87 |
+
self._accumulators = {}
|
| 88 |
+
self._paths = {}
|
| 89 |
+
self._reload_called = False
|
| 90 |
+
self._size_guidance = (
|
| 91 |
+
size_guidance or event_accumulator.DEFAULT_SIZE_GUIDANCE
|
| 92 |
+
)
|
| 93 |
+
self.purge_orphaned_data = purge_orphaned_data
|
| 94 |
+
if run_path_map is not None:
|
| 95 |
+
logger.info(
|
| 96 |
+
"Event Multplexer doing initialization load for %s",
|
| 97 |
+
run_path_map,
|
| 98 |
+
)
|
| 99 |
+
for run, path in run_path_map.items():
|
| 100 |
+
self.AddRun(path, run)
|
| 101 |
+
logger.info("Event Multiplexer done initializing")
|
| 102 |
+
|
| 103 |
+
def AddRun(self, path, name=None):
|
| 104 |
+
"""Add a run to the multiplexer.
|
| 105 |
+
|
| 106 |
+
If the name is not specified, it is the same as the path.
|
| 107 |
+
|
| 108 |
+
If a run by that name exists, and we are already watching the right path,
|
| 109 |
+
do nothing. If we are watching a different path, replace the event
|
| 110 |
+
accumulator.
|
| 111 |
+
|
| 112 |
+
If `Reload` has been called, it will `Reload` the newly created
|
| 113 |
+
accumulators.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
path: Path to the event files (or event directory) for given run.
|
| 117 |
+
name: Name of the run to add. If not provided, is set to path.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
The `EventMultiplexer`.
|
| 121 |
+
"""
|
| 122 |
+
name = name or path
|
| 123 |
+
accumulator = None
|
| 124 |
+
with self._accumulators_mutex:
|
| 125 |
+
if name not in self._accumulators or self._paths[name] != path:
|
| 126 |
+
if name in self._paths and self._paths[name] != path:
|
| 127 |
+
# TODO(@decentralion) - Make it impossible to overwrite an old path
|
| 128 |
+
# with a new path (just give the new path a distinct name)
|
| 129 |
+
logger.warning(
|
| 130 |
+
"Conflict for name %s: old path %s, new path %s",
|
| 131 |
+
name,
|
| 132 |
+
self._paths[name],
|
| 133 |
+
path,
|
| 134 |
+
)
|
| 135 |
+
logger.info("Constructing EventAccumulator for %s", path)
|
| 136 |
+
accumulator = event_accumulator.EventAccumulator(
|
| 137 |
+
path,
|
| 138 |
+
size_guidance=self._size_guidance,
|
| 139 |
+
purge_orphaned_data=self.purge_orphaned_data,
|
| 140 |
+
)
|
| 141 |
+
self._accumulators[name] = accumulator
|
| 142 |
+
self._paths[name] = path
|
| 143 |
+
if accumulator:
|
| 144 |
+
if self._reload_called:
|
| 145 |
+
accumulator.Reload()
|
| 146 |
+
return self
|
| 147 |
+
|
| 148 |
+
def AddRunsFromDirectory(self, path, name=None):
|
| 149 |
+
"""Load runs from a directory; recursively walks subdirectories.
|
| 150 |
+
|
| 151 |
+
If path doesn't exist, no-op. This ensures that it is safe to call
|
| 152 |
+
`AddRunsFromDirectory` multiple times, even before the directory is made.
|
| 153 |
+
|
| 154 |
+
If path is a directory, load event files in the directory (if any exist) and
|
| 155 |
+
recursively call AddRunsFromDirectory on any subdirectories. This mean you
|
| 156 |
+
can call AddRunsFromDirectory at the root of a tree of event logs and
|
| 157 |
+
TensorBoard will load them all.
|
| 158 |
+
|
| 159 |
+
If the `EventMultiplexer` is already loaded this will cause
|
| 160 |
+
the newly created accumulators to `Reload()`.
|
| 161 |
+
Args:
|
| 162 |
+
path: A string path to a directory to load runs from.
|
| 163 |
+
name: Optionally, what name to apply to the runs. If name is provided
|
| 164 |
+
and the directory contains run subdirectories, the name of each subrun
|
| 165 |
+
is the concatenation of the parent name and the subdirectory name. If
|
| 166 |
+
name is provided and the directory contains event files, then a run
|
| 167 |
+
is added called "name" and with the events from the path.
|
| 168 |
+
|
| 169 |
+
Raises:
|
| 170 |
+
ValueError: If the path exists and isn't a directory.
|
| 171 |
+
|
| 172 |
+
Returns:
|
| 173 |
+
The `EventMultiplexer`.
|
| 174 |
+
"""
|
| 175 |
+
logger.info("Starting AddRunsFromDirectory: %s", path)
|
| 176 |
+
for subdir in io_wrapper.GetLogdirSubdirectories(path):
|
| 177 |
+
logger.info("Adding events from directory %s", subdir)
|
| 178 |
+
rpath = os.path.relpath(subdir, path)
|
| 179 |
+
subname = os.path.join(name, rpath) if name else rpath
|
| 180 |
+
self.AddRun(subdir, name=subname)
|
| 181 |
+
logger.info("Done with AddRunsFromDirectory: %s", path)
|
| 182 |
+
return self
|
| 183 |
+
|
| 184 |
+
def Reload(self):
|
| 185 |
+
"""Call `Reload` on every `EventAccumulator`."""
|
| 186 |
+
logger.info("Beginning EventMultiplexer.Reload()")
|
| 187 |
+
self._reload_called = True
|
| 188 |
+
# Build a list so we're safe even if the list of accumulators is modified
|
| 189 |
+
# even while we're reloading.
|
| 190 |
+
with self._accumulators_mutex:
|
| 191 |
+
items = list(self._accumulators.items())
|
| 192 |
+
|
| 193 |
+
names_to_delete = set()
|
| 194 |
+
for name, accumulator in items:
|
| 195 |
+
try:
|
| 196 |
+
accumulator.Reload()
|
| 197 |
+
except (OSError, IOError) as e:
|
| 198 |
+
logger.error("Unable to reload accumulator '%s': %s", name, e)
|
| 199 |
+
except directory_watcher.DirectoryDeletedError:
|
| 200 |
+
names_to_delete.add(name)
|
| 201 |
+
|
| 202 |
+
with self._accumulators_mutex:
|
| 203 |
+
for name in names_to_delete:
|
| 204 |
+
logger.warning("Deleting accumulator '%s'", name)
|
| 205 |
+
del self._accumulators[name]
|
| 206 |
+
logger.info("Finished with EventMultiplexer.Reload()")
|
| 207 |
+
return self
|
| 208 |
+
|
| 209 |
+
def PluginAssets(self, plugin_name):
|
| 210 |
+
"""Get index of runs and assets for a given plugin.
|
| 211 |
+
|
| 212 |
+
Args:
|
| 213 |
+
plugin_name: Name of the plugin we are checking for.
|
| 214 |
+
|
| 215 |
+
Returns:
|
| 216 |
+
A dictionary that maps from run_name to a list of plugin
|
| 217 |
+
assets for that run.
|
| 218 |
+
"""
|
| 219 |
+
with self._accumulators_mutex:
|
| 220 |
+
# To avoid nested locks, we construct a copy of the run-accumulator map
|
| 221 |
+
items = list(self._accumulators.items())
|
| 222 |
+
|
| 223 |
+
return {run: accum.PluginAssets(plugin_name) for run, accum in items}
|
| 224 |
+
|
| 225 |
+
def RetrievePluginAsset(self, run, plugin_name, asset_name):
|
| 226 |
+
"""Return the contents for a specific plugin asset from a run.
|
| 227 |
+
|
| 228 |
+
Args:
|
| 229 |
+
run: The string name of the run.
|
| 230 |
+
plugin_name: The string name of a plugin.
|
| 231 |
+
asset_name: The string name of an asset.
|
| 232 |
+
|
| 233 |
+
Returns:
|
| 234 |
+
The string contents of the plugin asset.
|
| 235 |
+
|
| 236 |
+
Raises:
|
| 237 |
+
KeyError: If the asset is not available.
|
| 238 |
+
"""
|
| 239 |
+
accumulator = self.GetAccumulator(run)
|
| 240 |
+
return accumulator.RetrievePluginAsset(plugin_name, asset_name)
|
| 241 |
+
|
| 242 |
+
def FirstEventTimestamp(self, run):
|
| 243 |
+
"""Return the timestamp of the first event of the given run.
|
| 244 |
+
|
| 245 |
+
This may perform I/O if no events have been loaded yet for the run.
|
| 246 |
+
|
| 247 |
+
Args:
|
| 248 |
+
run: A string name of the run for which the timestamp is retrieved.
|
| 249 |
+
|
| 250 |
+
Returns:
|
| 251 |
+
The wall_time of the first event of the run, which will typically be
|
| 252 |
+
seconds since the epoch.
|
| 253 |
+
|
| 254 |
+
Raises:
|
| 255 |
+
KeyError: If the run is not found.
|
| 256 |
+
ValueError: If the run has no events loaded and there are no events on
|
| 257 |
+
disk to load.
|
| 258 |
+
"""
|
| 259 |
+
accumulator = self.GetAccumulator(run)
|
| 260 |
+
return accumulator.FirstEventTimestamp()
|
| 261 |
+
|
| 262 |
+
def GetSourceWriter(self, run) -> Optional[str]:
|
| 263 |
+
"""Returns the source writer name from the first event of the given run.
|
| 264 |
+
|
| 265 |
+
Assuming each run has only one source writer.
|
| 266 |
+
|
| 267 |
+
Args:
|
| 268 |
+
run: A string name of the run from which the event source information
|
| 269 |
+
is retrieved.
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
Name of the writer that wrote the events in the run.
|
| 273 |
+
"""
|
| 274 |
+
accumulator = self.GetAccumulator(run)
|
| 275 |
+
return accumulator.GetSourceWriter()
|
| 276 |
+
|
| 277 |
+
def Scalars(self, run, tag):
|
| 278 |
+
"""Retrieve the scalar events associated with a run and tag.
|
| 279 |
+
|
| 280 |
+
Args:
|
| 281 |
+
run: A string name of the run for which values are retrieved.
|
| 282 |
+
tag: A string name of the tag for which values are retrieved.
|
| 283 |
+
|
| 284 |
+
Raises:
|
| 285 |
+
KeyError: If the run is not found, or the tag is not available for
|
| 286 |
+
the given run.
|
| 287 |
+
|
| 288 |
+
Returns:
|
| 289 |
+
An array of `event_accumulator.ScalarEvents`.
|
| 290 |
+
"""
|
| 291 |
+
accumulator = self.GetAccumulator(run)
|
| 292 |
+
return accumulator.Scalars(tag)
|
| 293 |
+
|
| 294 |
+
def Graph(self, run):
|
| 295 |
+
"""Retrieve the graph associated with the provided run.
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
run: A string name of a run to load the graph for.
|
| 299 |
+
|
| 300 |
+
Raises:
|
| 301 |
+
KeyError: If the run is not found.
|
| 302 |
+
ValueError: If the run does not have an associated graph.
|
| 303 |
+
|
| 304 |
+
Returns:
|
| 305 |
+
The `GraphDef` protobuf data structure.
|
| 306 |
+
"""
|
| 307 |
+
accumulator = self.GetAccumulator(run)
|
| 308 |
+
return accumulator.Graph()
|
| 309 |
+
|
| 310 |
+
def SerializedGraph(self, run):
|
| 311 |
+
"""Retrieve the serialized graph associated with the provided run.
|
| 312 |
+
|
| 313 |
+
Args:
|
| 314 |
+
run: A string name of a run to load the graph for.
|
| 315 |
+
|
| 316 |
+
Raises:
|
| 317 |
+
KeyError: If the run is not found.
|
| 318 |
+
ValueError: If the run does not have an associated graph.
|
| 319 |
+
|
| 320 |
+
Returns:
|
| 321 |
+
The serialized form of the `GraphDef` protobuf data structure.
|
| 322 |
+
"""
|
| 323 |
+
accumulator = self.GetAccumulator(run)
|
| 324 |
+
return accumulator.SerializedGraph()
|
| 325 |
+
|
| 326 |
+
def MetaGraph(self, run):
|
| 327 |
+
"""Retrieve the metagraph associated with the provided run.
|
| 328 |
+
|
| 329 |
+
Args:
|
| 330 |
+
run: A string name of a run to load the graph for.
|
| 331 |
+
|
| 332 |
+
Raises:
|
| 333 |
+
KeyError: If the run is not found.
|
| 334 |
+
ValueError: If the run does not have an associated graph.
|
| 335 |
+
|
| 336 |
+
Returns:
|
| 337 |
+
The `MetaGraphDef` protobuf data structure.
|
| 338 |
+
"""
|
| 339 |
+
accumulator = self.GetAccumulator(run)
|
| 340 |
+
return accumulator.MetaGraph()
|
| 341 |
+
|
| 342 |
+
def RunMetadata(self, run, tag):
|
| 343 |
+
"""Get the session.run() metadata associated with a TensorFlow run and
|
| 344 |
+
tag.
|
| 345 |
+
|
| 346 |
+
Args:
|
| 347 |
+
run: A string name of a TensorFlow run.
|
| 348 |
+
tag: A string name of the tag associated with a particular session.run().
|
| 349 |
+
|
| 350 |
+
Raises:
|
| 351 |
+
KeyError: If the run is not found, or the tag is not available for the
|
| 352 |
+
given run.
|
| 353 |
+
|
| 354 |
+
Returns:
|
| 355 |
+
The metadata in the form of `RunMetadata` protobuf data structure.
|
| 356 |
+
"""
|
| 357 |
+
accumulator = self.GetAccumulator(run)
|
| 358 |
+
return accumulator.RunMetadata(tag)
|
| 359 |
+
|
| 360 |
+
def Histograms(self, run, tag):
|
| 361 |
+
"""Retrieve the histogram events associated with a run and tag.
|
| 362 |
+
|
| 363 |
+
Args:
|
| 364 |
+
run: A string name of the run for which values are retrieved.
|
| 365 |
+
tag: A string name of the tag for which values are retrieved.
|
| 366 |
+
|
| 367 |
+
Raises:
|
| 368 |
+
KeyError: If the run is not found, or the tag is not available for
|
| 369 |
+
the given run.
|
| 370 |
+
|
| 371 |
+
Returns:
|
| 372 |
+
An array of `event_accumulator.HistogramEvents`.
|
| 373 |
+
"""
|
| 374 |
+
accumulator = self.GetAccumulator(run)
|
| 375 |
+
return accumulator.Histograms(tag)
|
| 376 |
+
|
| 377 |
+
def CompressedHistograms(self, run, tag):
|
| 378 |
+
"""Retrieve the compressed histogram events associated with a run and
|
| 379 |
+
tag.
|
| 380 |
+
|
| 381 |
+
Args:
|
| 382 |
+
run: A string name of the run for which values are retrieved.
|
| 383 |
+
tag: A string name of the tag for which values are retrieved.
|
| 384 |
+
|
| 385 |
+
Raises:
|
| 386 |
+
KeyError: If the run is not found, or the tag is not available for
|
| 387 |
+
the given run.
|
| 388 |
+
|
| 389 |
+
Returns:
|
| 390 |
+
An array of `event_accumulator.CompressedHistogramEvents`.
|
| 391 |
+
"""
|
| 392 |
+
accumulator = self.GetAccumulator(run)
|
| 393 |
+
return accumulator.CompressedHistograms(tag)
|
| 394 |
+
|
| 395 |
+
def Images(self, run, tag):
|
| 396 |
+
"""Retrieve the image events associated with a run and tag.
|
| 397 |
+
|
| 398 |
+
Args:
|
| 399 |
+
run: A string name of the run for which values are retrieved.
|
| 400 |
+
tag: A string name of the tag for which values are retrieved.
|
| 401 |
+
|
| 402 |
+
Raises:
|
| 403 |
+
KeyError: If the run is not found, or the tag is not available for
|
| 404 |
+
the given run.
|
| 405 |
+
|
| 406 |
+
Returns:
|
| 407 |
+
An array of `event_accumulator.ImageEvents`.
|
| 408 |
+
"""
|
| 409 |
+
accumulator = self.GetAccumulator(run)
|
| 410 |
+
return accumulator.Images(tag)
|
| 411 |
+
|
| 412 |
+
def Audio(self, run, tag):
|
| 413 |
+
"""Retrieve the audio events associated with a run and tag.
|
| 414 |
+
|
| 415 |
+
Args:
|
| 416 |
+
run: A string name of the run for which values are retrieved.
|
| 417 |
+
tag: A string name of the tag for which values are retrieved.
|
| 418 |
+
|
| 419 |
+
Raises:
|
| 420 |
+
KeyError: If the run is not found, or the tag is not available for
|
| 421 |
+
the given run.
|
| 422 |
+
|
| 423 |
+
Returns:
|
| 424 |
+
An array of `event_accumulator.AudioEvents`.
|
| 425 |
+
"""
|
| 426 |
+
accumulator = self.GetAccumulator(run)
|
| 427 |
+
return accumulator.Audio(tag)
|
| 428 |
+
|
| 429 |
+
def Tensors(self, run, tag):
|
| 430 |
+
"""Retrieve the tensor events associated with a run and tag.
|
| 431 |
+
|
| 432 |
+
Args:
|
| 433 |
+
run: A string name of the run for which values are retrieved.
|
| 434 |
+
tag: A string name of the tag for which values are retrieved.
|
| 435 |
+
|
| 436 |
+
Raises:
|
| 437 |
+
KeyError: If the run is not found, or the tag is not available for
|
| 438 |
+
the given run.
|
| 439 |
+
|
| 440 |
+
Returns:
|
| 441 |
+
An array of `event_accumulator.TensorEvent`s.
|
| 442 |
+
"""
|
| 443 |
+
accumulator = self.GetAccumulator(run)
|
| 444 |
+
return accumulator.Tensors(tag)
|
| 445 |
+
|
| 446 |
+
def PluginRunToTagToContent(self, plugin_name):
|
| 447 |
+
"""Returns a 2-layer dictionary of the form {run: {tag: content}}.
|
| 448 |
+
|
| 449 |
+
The `content` referred above is the content field of the PluginData proto
|
| 450 |
+
for the specified plugin within a Summary.Value proto.
|
| 451 |
+
|
| 452 |
+
Args:
|
| 453 |
+
plugin_name: The name of the plugin for which to fetch content.
|
| 454 |
+
|
| 455 |
+
Returns:
|
| 456 |
+
A dictionary of the form {run: {tag: content}}.
|
| 457 |
+
"""
|
| 458 |
+
mapping = {}
|
| 459 |
+
for run in self.Runs():
|
| 460 |
+
try:
|
| 461 |
+
tag_to_content = self.GetAccumulator(run).PluginTagToContent(
|
| 462 |
+
plugin_name
|
| 463 |
+
)
|
| 464 |
+
except KeyError:
|
| 465 |
+
# This run lacks content for the plugin. Try the next run.
|
| 466 |
+
continue
|
| 467 |
+
mapping[run] = tag_to_content
|
| 468 |
+
return mapping
|
| 469 |
+
|
| 470 |
+
def SummaryMetadata(self, run, tag):
|
| 471 |
+
"""Return the summary metadata for the given tag on the given run.
|
| 472 |
+
|
| 473 |
+
Args:
|
| 474 |
+
run: A string name of the run for which summary metadata is to be
|
| 475 |
+
retrieved.
|
| 476 |
+
tag: A string name of the tag whose summary metadata is to be
|
| 477 |
+
retrieved.
|
| 478 |
+
|
| 479 |
+
Raises:
|
| 480 |
+
KeyError: If the run is not found, or the tag is not available for
|
| 481 |
+
the given run.
|
| 482 |
+
|
| 483 |
+
Returns:
|
| 484 |
+
A `SummaryMetadata` protobuf.
|
| 485 |
+
"""
|
| 486 |
+
accumulator = self.GetAccumulator(run)
|
| 487 |
+
return accumulator.SummaryMetadata(tag)
|
| 488 |
+
|
| 489 |
+
def Runs(self):
|
| 490 |
+
"""Return all the run names in the `EventMultiplexer`.
|
| 491 |
+
|
| 492 |
+
Returns:
|
| 493 |
+
```
|
| 494 |
+
{runName: { images: [tag1, tag2, tag3],
|
| 495 |
+
scalarValues: [tagA, tagB, tagC],
|
| 496 |
+
histograms: [tagX, tagY, tagZ],
|
| 497 |
+
compressedHistograms: [tagX, tagY, tagZ],
|
| 498 |
+
graph: true, meta_graph: true}}
|
| 499 |
+
```
|
| 500 |
+
"""
|
| 501 |
+
with self._accumulators_mutex:
|
| 502 |
+
# To avoid nested locks, we construct a copy of the run-accumulator map
|
| 503 |
+
items = list(self._accumulators.items())
|
| 504 |
+
return {run_name: accumulator.Tags() for run_name, accumulator in items}
|
| 505 |
+
|
| 506 |
+
def RunPaths(self):
|
| 507 |
+
"""Returns a dict mapping run names to event file paths."""
|
| 508 |
+
return self._paths
|
| 509 |
+
|
| 510 |
+
def GetAccumulator(self, run):
|
| 511 |
+
"""Returns EventAccumulator for a given run.
|
| 512 |
+
|
| 513 |
+
Args:
|
| 514 |
+
run: String name of run.
|
| 515 |
+
|
| 516 |
+
Returns:
|
| 517 |
+
An EventAccumulator object.
|
| 518 |
+
|
| 519 |
+
Raises:
|
| 520 |
+
KeyError: If run does not exist.
|
| 521 |
+
"""
|
| 522 |
+
with self._accumulators_mutex:
|
| 523 |
+
return self._accumulators[run]
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/event_util.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""Functionality for processing events."""
|
| 17 |
+
|
| 18 |
+
from typing import Optional
|
| 19 |
+
|
| 20 |
+
from tensorboard.compat.proto import event_pb2
|
| 21 |
+
from tensorboard.util import tb_logging
|
| 22 |
+
|
| 23 |
+
logger = tb_logging.get_logger()
|
| 24 |
+
|
| 25 |
+
# Maxmimum length for event writer name.
|
| 26 |
+
_MAX_WRITER_NAME_LEN = 128
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def ParseFileVersion(file_version: str) -> float:
|
| 30 |
+
"""Convert the string file_version in event.proto into a float.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
file_version: String file_version from event.proto
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
Version number as a float.
|
| 37 |
+
"""
|
| 38 |
+
tokens = file_version.split("brain.Event:")
|
| 39 |
+
try:
|
| 40 |
+
return float(tokens[-1])
|
| 41 |
+
except ValueError:
|
| 42 |
+
## This should never happen according to the definition of file_version
|
| 43 |
+
## specified in event.proto.
|
| 44 |
+
logger.warning(
|
| 45 |
+
(
|
| 46 |
+
"Invalid event.proto file_version. Defaulting to use of "
|
| 47 |
+
"out-of-order event.step logic for purging expired events."
|
| 48 |
+
)
|
| 49 |
+
)
|
| 50 |
+
return -1
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def GetSourceWriter(
|
| 54 |
+
source_metadata: event_pb2.SourceMetadata,
|
| 55 |
+
) -> Optional[str]:
|
| 56 |
+
"""Gets the source writer name from the source metadata proto."""
|
| 57 |
+
writer_name = source_metadata.writer
|
| 58 |
+
if not writer_name:
|
| 59 |
+
return None
|
| 60 |
+
# Checks the length of the writer name.
|
| 61 |
+
if len(writer_name) > _MAX_WRITER_NAME_LEN:
|
| 62 |
+
logger.error(
|
| 63 |
+
"Source writer name `%s` is too long, maximum allowed length is %d.",
|
| 64 |
+
writer_name,
|
| 65 |
+
_MAX_WRITER_NAME_LEN,
|
| 66 |
+
)
|
| 67 |
+
return None
|
| 68 |
+
return writer_name
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/plugin_asset_util.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Load plugin assets from disk."""
|
| 16 |
+
|
| 17 |
+
import os.path
|
| 18 |
+
|
| 19 |
+
from tensorboard.compat import tf
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
_PLUGINS_DIR = "plugins"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _IsDirectory(parent, item):
|
| 26 |
+
"""Helper that returns if parent/item is a directory."""
|
| 27 |
+
return tf.io.gfile.isdir(os.path.join(parent, item))
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def PluginDirectory(logdir, plugin_name):
|
| 31 |
+
"""Returns the plugin directory for plugin_name."""
|
| 32 |
+
return os.path.join(logdir, _PLUGINS_DIR, plugin_name)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def ListPlugins(logdir):
|
| 36 |
+
"""List all the plugins that have registered assets in logdir.
|
| 37 |
+
|
| 38 |
+
If the plugins_dir does not exist, it returns an empty list. This maintains
|
| 39 |
+
compatibility with old directories that have no plugins written.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
logdir: A directory that was created by a TensorFlow events writer.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
a list of plugin names, as strings
|
| 46 |
+
"""
|
| 47 |
+
plugins_dir = os.path.join(logdir, _PLUGINS_DIR)
|
| 48 |
+
try:
|
| 49 |
+
entries = tf.io.gfile.listdir(plugins_dir)
|
| 50 |
+
except tf.errors.NotFoundError:
|
| 51 |
+
return []
|
| 52 |
+
# Strip trailing slashes, which listdir() includes for some filesystems
|
| 53 |
+
# for subdirectories, after using them to bypass IsDirectory().
|
| 54 |
+
return [
|
| 55 |
+
x.rstrip("/")
|
| 56 |
+
for x in entries
|
| 57 |
+
if x.endswith("/") or _IsDirectory(plugins_dir, x)
|
| 58 |
+
]
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def ListAssets(logdir, plugin_name):
|
| 62 |
+
"""List all the assets that are available for given plugin in a logdir.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
logdir: A directory that was created by a TensorFlow summary.FileWriter.
|
| 66 |
+
plugin_name: A string name of a plugin to list assets for.
|
| 67 |
+
|
| 68 |
+
Returns:
|
| 69 |
+
A string list of available plugin assets. If the plugin subdirectory does
|
| 70 |
+
not exist (either because the logdir doesn't exist, or because the plugin
|
| 71 |
+
didn't register) an empty list is returned.
|
| 72 |
+
"""
|
| 73 |
+
plugin_dir = PluginDirectory(logdir, plugin_name)
|
| 74 |
+
try:
|
| 75 |
+
# Strip trailing slashes, which listdir() includes for some filesystems.
|
| 76 |
+
return [x.rstrip("/") for x in tf.io.gfile.listdir(plugin_dir)]
|
| 77 |
+
except tf.errors.NotFoundError:
|
| 78 |
+
return []
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def RetrieveAsset(logdir, plugin_name, asset_name):
|
| 82 |
+
"""Retrieve a particular plugin asset from a logdir.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
logdir: A directory that was created by a TensorFlow summary.FileWriter.
|
| 86 |
+
plugin_name: The plugin we want an asset from.
|
| 87 |
+
asset_name: The name of the requested asset.
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
string contents of the plugin asset.
|
| 91 |
+
|
| 92 |
+
Raises:
|
| 93 |
+
KeyError: if the asset does not exist.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name)
|
| 97 |
+
try:
|
| 98 |
+
with tf.io.gfile.GFile(asset_path, "r") as f:
|
| 99 |
+
return f.read()
|
| 100 |
+
except tf.errors.NotFoundError:
|
| 101 |
+
raise KeyError("Asset path %s not found" % asset_path)
|
| 102 |
+
except tf.errors.OpError as e:
|
| 103 |
+
raise KeyError(
|
| 104 |
+
"Couldn't read asset path: %s, OpError %s" % (asset_path, e)
|
| 105 |
+
)
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/event_processing/plugin_event_accumulator.py
ADDED
|
@@ -0,0 +1,722 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Takes a generator of values, and accumulates them for a frontend."""
|
| 16 |
+
|
| 17 |
+
import collections
|
| 18 |
+
import dataclasses
|
| 19 |
+
import threading
|
| 20 |
+
|
| 21 |
+
from typing import Optional
|
| 22 |
+
|
| 23 |
+
from tensorboard.backend.event_processing import directory_loader
|
| 24 |
+
from tensorboard.backend.event_processing import directory_watcher
|
| 25 |
+
from tensorboard.backend.event_processing import event_file_loader
|
| 26 |
+
from tensorboard.backend.event_processing import event_util
|
| 27 |
+
from tensorboard.backend.event_processing import io_wrapper
|
| 28 |
+
from tensorboard.backend.event_processing import plugin_asset_util
|
| 29 |
+
from tensorboard.backend.event_processing import reservoir
|
| 30 |
+
from tensorboard.backend.event_processing import tag_types
|
| 31 |
+
from tensorboard.compat.proto import config_pb2
|
| 32 |
+
from tensorboard.compat.proto import event_pb2
|
| 33 |
+
from tensorboard.compat.proto import graph_pb2
|
| 34 |
+
from tensorboard.compat.proto import meta_graph_pb2
|
| 35 |
+
from tensorboard.compat.proto import tensor_pb2
|
| 36 |
+
from tensorboard.util import tb_logging
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
logger = tb_logging.get_logger()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Legacy aliases
|
| 43 |
+
TENSORS = tag_types.TENSORS
|
| 44 |
+
GRAPH = tag_types.GRAPH
|
| 45 |
+
META_GRAPH = tag_types.META_GRAPH
|
| 46 |
+
RUN_METADATA = tag_types.RUN_METADATA
|
| 47 |
+
|
| 48 |
+
DEFAULT_SIZE_GUIDANCE = {
|
| 49 |
+
TENSORS: 500,
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
STORE_EVERYTHING_SIZE_GUIDANCE = {
|
| 53 |
+
TENSORS: 0,
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
_TENSOR_RESERVOIR_KEY = "." # arbitrary
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@dataclasses.dataclass(frozen=True)
|
| 60 |
+
class TensorEvent:
|
| 61 |
+
"""A tensor event.
|
| 62 |
+
|
| 63 |
+
Attributes:
|
| 64 |
+
wall_time: Timestamp of the event in seconds.
|
| 65 |
+
step: Global step of the event.
|
| 66 |
+
tensor_proto: A `TensorProto`.
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
wall_time: float
|
| 70 |
+
step: int
|
| 71 |
+
tensor_proto: tensor_pb2.TensorProto
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class EventAccumulator:
|
| 75 |
+
"""An `EventAccumulator` takes an event generator, and accumulates the
|
| 76 |
+
values.
|
| 77 |
+
|
| 78 |
+
The `EventAccumulator` is intended to provide a convenient Python
|
| 79 |
+
interface for loading Event data written during a TensorFlow run.
|
| 80 |
+
TensorFlow writes out `Event` protobuf objects, which have a timestamp
|
| 81 |
+
and step number, and often contain a `Summary`. Summaries can have
|
| 82 |
+
different kinds of data stored as arbitrary tensors. The Summaries
|
| 83 |
+
also have a tag, which we use to organize logically related data. The
|
| 84 |
+
`EventAccumulator` supports retrieving the `Event` and `Summary` data
|
| 85 |
+
by its tag.
|
| 86 |
+
|
| 87 |
+
Calling `Tags()` gets a map from `tagType` (i.e., `tensors`) to the
|
| 88 |
+
associated tags for those data types. Then, the functional endpoint
|
| 89 |
+
(i.g., `Accumulator.Tensors(tag)`) allows for the retrieval of all
|
| 90 |
+
data associated with that tag.
|
| 91 |
+
|
| 92 |
+
The `Reload()` method synchronously loads all of the data written so far.
|
| 93 |
+
|
| 94 |
+
Fields:
|
| 95 |
+
most_recent_step: Step of last Event proto added. This should only
|
| 96 |
+
be accessed from the thread that calls Reload. This is -1 if
|
| 97 |
+
nothing has been loaded yet.
|
| 98 |
+
most_recent_wall_time: Timestamp of last Event proto added. This is
|
| 99 |
+
a float containing seconds from the UNIX epoch, or -1 if
|
| 100 |
+
nothing has been loaded yet. This should only be accessed from
|
| 101 |
+
the thread that calls Reload.
|
| 102 |
+
path: A file path to a directory containing tf events files, or a single
|
| 103 |
+
tf events file. The accumulator will load events from this path.
|
| 104 |
+
tensors_by_tag: A dictionary mapping each tag name to a
|
| 105 |
+
reservoir.Reservoir of tensor summaries. Each such reservoir will
|
| 106 |
+
only use a single key, given by `_TENSOR_RESERVOIR_KEY`.
|
| 107 |
+
|
| 108 |
+
@@Tensors
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
def __init__(
|
| 112 |
+
self,
|
| 113 |
+
path,
|
| 114 |
+
size_guidance=None,
|
| 115 |
+
tensor_size_guidance=None,
|
| 116 |
+
purge_orphaned_data=True,
|
| 117 |
+
event_file_active_filter=None,
|
| 118 |
+
detect_file_replacement=None,
|
| 119 |
+
):
|
| 120 |
+
"""Construct the `EventAccumulator`.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
path: A file path to a directory containing tf events files, or a single
|
| 124 |
+
tf events file. The accumulator will load events from this path.
|
| 125 |
+
size_guidance: Information on how much data the EventAccumulator should
|
| 126 |
+
store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much
|
| 127 |
+
so as to avoid OOMing the client. The size_guidance should be a map
|
| 128 |
+
from a `tagType` string to an integer representing the number of
|
| 129 |
+
items to keep per tag for items of that `tagType`. If the size is 0,
|
| 130 |
+
all events are stored.
|
| 131 |
+
tensor_size_guidance: Like `size_guidance`, but allowing finer
|
| 132 |
+
granularity for tensor summaries. Should be a map from the
|
| 133 |
+
`plugin_name` field on the `PluginData` proto to an integer
|
| 134 |
+
representing the number of items to keep per tag. Plugins for
|
| 135 |
+
which there is no entry in this map will default to the value of
|
| 136 |
+
`size_guidance[event_accumulator.TENSORS]`. Defaults to `{}`.
|
| 137 |
+
purge_orphaned_data: Whether to discard any events that were "orphaned" by
|
| 138 |
+
a TensorFlow restart.
|
| 139 |
+
event_file_active_filter: Optional predicate for determining whether an
|
| 140 |
+
event file latest load timestamp should be considered active. If passed,
|
| 141 |
+
this will enable multifile directory loading.
|
| 142 |
+
detect_file_replacement: Optional boolean; if True, event file loading
|
| 143 |
+
will try to detect when a file has been replaced with a new version
|
| 144 |
+
that contains additional data, by monitoring the file size.
|
| 145 |
+
"""
|
| 146 |
+
size_guidance = dict(size_guidance or DEFAULT_SIZE_GUIDANCE)
|
| 147 |
+
sizes = {}
|
| 148 |
+
for key in DEFAULT_SIZE_GUIDANCE:
|
| 149 |
+
if key in size_guidance:
|
| 150 |
+
sizes[key] = size_guidance[key]
|
| 151 |
+
else:
|
| 152 |
+
sizes[key] = DEFAULT_SIZE_GUIDANCE[key]
|
| 153 |
+
self._size_guidance = size_guidance
|
| 154 |
+
self._tensor_size_guidance = dict(tensor_size_guidance or {})
|
| 155 |
+
|
| 156 |
+
self._first_event_timestamp = None
|
| 157 |
+
|
| 158 |
+
self._graph = None
|
| 159 |
+
self._graph_from_metagraph = False
|
| 160 |
+
self._meta_graph = None
|
| 161 |
+
self._tagged_metadata = {}
|
| 162 |
+
self.summary_metadata = {}
|
| 163 |
+
self.tensors_by_tag = {}
|
| 164 |
+
self._tensors_by_tag_lock = threading.Lock()
|
| 165 |
+
|
| 166 |
+
# Keep a mapping from plugin name to a dict mapping from tag to plugin data
|
| 167 |
+
# content obtained from the SummaryMetadata (metadata field of Value) for
|
| 168 |
+
# that plugin (This is not the entire SummaryMetadata proto - only the
|
| 169 |
+
# content for that plugin). The SummaryWriter only keeps the content on the
|
| 170 |
+
# first event encountered per tag, so we must store that first instance of
|
| 171 |
+
# content for each tag.
|
| 172 |
+
self._plugin_to_tag_to_content = collections.defaultdict(dict)
|
| 173 |
+
# Locks the dict `_plugin_to_tag_to_content` as well as the
|
| 174 |
+
# dicts `_plugin_to_tag_to_content[p]` for each `p`.
|
| 175 |
+
self._plugin_tag_lock = threading.Lock()
|
| 176 |
+
|
| 177 |
+
self.path = path
|
| 178 |
+
self._generator = _GeneratorFromPath(
|
| 179 |
+
path, event_file_active_filter, detect_file_replacement
|
| 180 |
+
)
|
| 181 |
+
self._generator_mutex = threading.Lock()
|
| 182 |
+
|
| 183 |
+
self.purge_orphaned_data = purge_orphaned_data
|
| 184 |
+
self._seen_session_start = False
|
| 185 |
+
|
| 186 |
+
self.most_recent_step = -1
|
| 187 |
+
self.most_recent_wall_time = -1
|
| 188 |
+
self.file_version = None
|
| 189 |
+
|
| 190 |
+
# Name of the source writer that writes the event.
|
| 191 |
+
self._source_writer = None
|
| 192 |
+
|
| 193 |
+
def Reload(self):
|
| 194 |
+
"""Loads all events added since the last call to `Reload`.
|
| 195 |
+
|
| 196 |
+
If `Reload` was never called, loads all events in the file.
|
| 197 |
+
|
| 198 |
+
Returns:
|
| 199 |
+
The `EventAccumulator`.
|
| 200 |
+
"""
|
| 201 |
+
with self._generator_mutex:
|
| 202 |
+
for event in self._generator.Load():
|
| 203 |
+
self._ProcessEvent(event)
|
| 204 |
+
return self
|
| 205 |
+
|
| 206 |
+
def PluginAssets(self, plugin_name):
|
| 207 |
+
"""Return a list of all plugin assets for the given plugin.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
plugin_name: The string name of a plugin to retrieve assets for.
|
| 211 |
+
|
| 212 |
+
Returns:
|
| 213 |
+
A list of string plugin asset names, or empty list if none are available.
|
| 214 |
+
If the plugin was not registered, an empty list is returned.
|
| 215 |
+
"""
|
| 216 |
+
return plugin_asset_util.ListAssets(self.path, plugin_name)
|
| 217 |
+
|
| 218 |
+
def RetrievePluginAsset(self, plugin_name, asset_name):
|
| 219 |
+
"""Return the contents of a given plugin asset.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
plugin_name: The string name of a plugin.
|
| 223 |
+
asset_name: The string name of an asset.
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
The string contents of the plugin asset.
|
| 227 |
+
|
| 228 |
+
Raises:
|
| 229 |
+
KeyError: If the asset is not available.
|
| 230 |
+
"""
|
| 231 |
+
return plugin_asset_util.RetrieveAsset(
|
| 232 |
+
self.path, plugin_name, asset_name
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
def FirstEventTimestamp(self):
|
| 236 |
+
"""Returns the timestamp in seconds of the first event.
|
| 237 |
+
|
| 238 |
+
If the first event has been loaded (either by this method or by `Reload`,
|
| 239 |
+
this returns immediately. Otherwise, it will load in the first event. Note
|
| 240 |
+
that this means that calling `Reload` will cause this to block until
|
| 241 |
+
`Reload` has finished.
|
| 242 |
+
|
| 243 |
+
Returns:
|
| 244 |
+
The timestamp in seconds of the first event that was loaded.
|
| 245 |
+
|
| 246 |
+
Raises:
|
| 247 |
+
ValueError: If no events have been loaded and there were no events found
|
| 248 |
+
on disk.
|
| 249 |
+
"""
|
| 250 |
+
if self._first_event_timestamp is not None:
|
| 251 |
+
return self._first_event_timestamp
|
| 252 |
+
with self._generator_mutex:
|
| 253 |
+
try:
|
| 254 |
+
event = next(self._generator.Load())
|
| 255 |
+
self._ProcessEvent(event)
|
| 256 |
+
return self._first_event_timestamp
|
| 257 |
+
|
| 258 |
+
except StopIteration:
|
| 259 |
+
raise ValueError("No event timestamp could be found")
|
| 260 |
+
|
| 261 |
+
def GetSourceWriter(self) -> Optional[str]:
|
| 262 |
+
"""Returns the name of the event writer."""
|
| 263 |
+
if self._source_writer is not None:
|
| 264 |
+
return self._source_writer
|
| 265 |
+
with self._generator_mutex:
|
| 266 |
+
try:
|
| 267 |
+
event = next(self._generator.Load())
|
| 268 |
+
self._ProcessEvent(event)
|
| 269 |
+
return self._source_writer
|
| 270 |
+
except StopIteration:
|
| 271 |
+
logger.info(
|
| 272 |
+
"End of file in %s, no source writer was found.", self.path
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
def PluginTagToContent(self, plugin_name):
|
| 276 |
+
"""Returns a dict mapping tags to content specific to that plugin.
|
| 277 |
+
|
| 278 |
+
Args:
|
| 279 |
+
plugin_name: The name of the plugin for which to fetch plugin-specific
|
| 280 |
+
content.
|
| 281 |
+
|
| 282 |
+
Raises:
|
| 283 |
+
KeyError: if the plugin name is not found.
|
| 284 |
+
|
| 285 |
+
Returns:
|
| 286 |
+
A dict mapping tag names to bytestrings of plugin-specific content-- by
|
| 287 |
+
convention, in the form of binary serialized protos.
|
| 288 |
+
"""
|
| 289 |
+
with self._plugin_tag_lock:
|
| 290 |
+
if plugin_name not in self._plugin_to_tag_to_content:
|
| 291 |
+
raise KeyError("Plugin %r could not be found." % plugin_name)
|
| 292 |
+
# Return a snapshot to avoid concurrent mutation and iteration issues.
|
| 293 |
+
return dict(self._plugin_to_tag_to_content[plugin_name])
|
| 294 |
+
|
| 295 |
+
def ActivePlugins(self):
|
| 296 |
+
"""Return a set of plugins with summary data.
|
| 297 |
+
|
| 298 |
+
Returns:
|
| 299 |
+
The distinct union of `plugin_data.plugin_name` fields from
|
| 300 |
+
all the `SummaryMetadata` protos stored in this accumulator.
|
| 301 |
+
"""
|
| 302 |
+
with self._plugin_tag_lock:
|
| 303 |
+
return frozenset(self._plugin_to_tag_to_content)
|
| 304 |
+
|
| 305 |
+
def SummaryMetadata(self, tag):
|
| 306 |
+
"""Given a summary tag name, return the associated metadata object.
|
| 307 |
+
|
| 308 |
+
Args:
|
| 309 |
+
tag: The name of a tag, as a string.
|
| 310 |
+
|
| 311 |
+
Raises:
|
| 312 |
+
KeyError: If the tag is not found.
|
| 313 |
+
|
| 314 |
+
Returns:
|
| 315 |
+
A `SummaryMetadata` protobuf.
|
| 316 |
+
"""
|
| 317 |
+
return self.summary_metadata[tag]
|
| 318 |
+
|
| 319 |
+
def AllSummaryMetadata(self):
|
| 320 |
+
"""Return summary metadata for all tags.
|
| 321 |
+
|
| 322 |
+
Returns:
|
| 323 |
+
A dict `d` such that `d[tag]` is a `SummaryMetadata` proto for
|
| 324 |
+
the keyed tag.
|
| 325 |
+
"""
|
| 326 |
+
return dict(self.summary_metadata)
|
| 327 |
+
|
| 328 |
+
def _ProcessEvent(self, event):
|
| 329 |
+
"""Called whenever an event is loaded."""
|
| 330 |
+
if self._first_event_timestamp is None:
|
| 331 |
+
self._first_event_timestamp = event.wall_time
|
| 332 |
+
|
| 333 |
+
if event.HasField("source_metadata"):
|
| 334 |
+
new_source_writer = event_util.GetSourceWriter(
|
| 335 |
+
event.source_metadata
|
| 336 |
+
)
|
| 337 |
+
if self._source_writer and self._source_writer != new_source_writer:
|
| 338 |
+
logger.info(
|
| 339 |
+
(
|
| 340 |
+
"Found new source writer for event.proto. "
|
| 341 |
+
"Old: {0}, New: {1}"
|
| 342 |
+
).format(self._source_writer, new_source_writer)
|
| 343 |
+
)
|
| 344 |
+
self._source_writer = new_source_writer
|
| 345 |
+
|
| 346 |
+
if event.HasField("file_version"):
|
| 347 |
+
new_file_version = event_util.ParseFileVersion(event.file_version)
|
| 348 |
+
if self.file_version and self.file_version != new_file_version:
|
| 349 |
+
## This should not happen.
|
| 350 |
+
logger.warning(
|
| 351 |
+
(
|
| 352 |
+
"Found new file_version for event.proto. This will "
|
| 353 |
+
"affect purging logic for TensorFlow restarts. "
|
| 354 |
+
"Old: {0} New: {1}"
|
| 355 |
+
).format(self.file_version, new_file_version)
|
| 356 |
+
)
|
| 357 |
+
self.file_version = new_file_version
|
| 358 |
+
|
| 359 |
+
self._MaybePurgeOrphanedData(event)
|
| 360 |
+
|
| 361 |
+
## Process the event.
|
| 362 |
+
# GraphDef and MetaGraphDef are handled in a special way:
|
| 363 |
+
# If no graph_def Event is available, but a meta_graph_def is, and it
|
| 364 |
+
# contains a graph_def, then use the meta_graph_def.graph_def as our graph.
|
| 365 |
+
# If a graph_def Event is available, always prefer it to the graph_def
|
| 366 |
+
# inside the meta_graph_def.
|
| 367 |
+
if event.HasField("graph_def"):
|
| 368 |
+
if self._graph is not None:
|
| 369 |
+
logger.warning(
|
| 370 |
+
(
|
| 371 |
+
"Found more than one graph event per run, or there was "
|
| 372 |
+
"a metagraph containing a graph_def, as well as one or "
|
| 373 |
+
"more graph events. Overwriting the graph with the "
|
| 374 |
+
"newest event."
|
| 375 |
+
)
|
| 376 |
+
)
|
| 377 |
+
self._graph = event.graph_def
|
| 378 |
+
self._graph_from_metagraph = False
|
| 379 |
+
elif event.HasField("meta_graph_def"):
|
| 380 |
+
if self._meta_graph is not None:
|
| 381 |
+
logger.warning(
|
| 382 |
+
(
|
| 383 |
+
"Found more than one metagraph event per run. "
|
| 384 |
+
"Overwriting the metagraph with the newest event."
|
| 385 |
+
)
|
| 386 |
+
)
|
| 387 |
+
self._meta_graph = event.meta_graph_def
|
| 388 |
+
if self._graph is None or self._graph_from_metagraph:
|
| 389 |
+
# We may have a graph_def in the metagraph. If so, and no
|
| 390 |
+
# graph_def is directly available, use this one instead.
|
| 391 |
+
meta_graph = meta_graph_pb2.MetaGraphDef()
|
| 392 |
+
meta_graph.ParseFromString(self._meta_graph)
|
| 393 |
+
if meta_graph.graph_def:
|
| 394 |
+
if self._graph is not None:
|
| 395 |
+
logger.warning(
|
| 396 |
+
(
|
| 397 |
+
"Found multiple metagraphs containing graph_defs,"
|
| 398 |
+
"but did not find any graph events. Overwriting the "
|
| 399 |
+
"graph with the newest metagraph version."
|
| 400 |
+
)
|
| 401 |
+
)
|
| 402 |
+
self._graph_from_metagraph = True
|
| 403 |
+
self._graph = meta_graph.graph_def.SerializeToString()
|
| 404 |
+
elif event.HasField("tagged_run_metadata"):
|
| 405 |
+
tag = event.tagged_run_metadata.tag
|
| 406 |
+
if tag in self._tagged_metadata:
|
| 407 |
+
logger.warning(
|
| 408 |
+
'Found more than one "run metadata" event with tag '
|
| 409 |
+
+ tag
|
| 410 |
+
+ ". Overwriting it with the newest event."
|
| 411 |
+
)
|
| 412 |
+
self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata
|
| 413 |
+
elif event.HasField("summary"):
|
| 414 |
+
for value in event.summary.value:
|
| 415 |
+
if value.HasField("metadata"):
|
| 416 |
+
tag = value.tag
|
| 417 |
+
# We only store the first instance of the metadata. This check
|
| 418 |
+
# is important: the `FileWriter` does strip metadata from all
|
| 419 |
+
# values except the first one per each tag, but a new
|
| 420 |
+
# `FileWriter` is created every time a training job stops and
|
| 421 |
+
# restarts. Hence, we must also ignore non-initial metadata in
|
| 422 |
+
# this logic.
|
| 423 |
+
if tag not in self.summary_metadata:
|
| 424 |
+
self.summary_metadata[tag] = value.metadata
|
| 425 |
+
plugin_data = value.metadata.plugin_data
|
| 426 |
+
if plugin_data.plugin_name:
|
| 427 |
+
with self._plugin_tag_lock:
|
| 428 |
+
self._plugin_to_tag_to_content[
|
| 429 |
+
plugin_data.plugin_name
|
| 430 |
+
][tag] = plugin_data.content
|
| 431 |
+
else:
|
| 432 |
+
logger.warning(
|
| 433 |
+
(
|
| 434 |
+
"This summary with tag %r is oddly not associated with a "
|
| 435 |
+
"plugin."
|
| 436 |
+
),
|
| 437 |
+
tag,
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
if value.HasField("tensor"):
|
| 441 |
+
datum = value.tensor
|
| 442 |
+
tag = value.tag
|
| 443 |
+
if not tag:
|
| 444 |
+
# This tensor summary was created using the old method that used
|
| 445 |
+
# plugin assets. We must still continue to support it.
|
| 446 |
+
tag = value.node_name
|
| 447 |
+
self._ProcessTensor(tag, event.wall_time, event.step, datum)
|
| 448 |
+
|
| 449 |
+
def Tags(self):
|
| 450 |
+
"""Return all tags found in the value stream.
|
| 451 |
+
|
| 452 |
+
Returns:
|
| 453 |
+
A `{tagType: ['list', 'of', 'tags']}` dictionary.
|
| 454 |
+
"""
|
| 455 |
+
return {
|
| 456 |
+
TENSORS: list(self.tensors_by_tag.keys()),
|
| 457 |
+
# Use a heuristic: if the metagraph is available, but
|
| 458 |
+
# graph is not, then we assume the metagraph contains the graph.
|
| 459 |
+
GRAPH: self._graph is not None,
|
| 460 |
+
META_GRAPH: self._meta_graph is not None,
|
| 461 |
+
RUN_METADATA: list(self._tagged_metadata.keys()),
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
def Graph(self):
|
| 465 |
+
"""Return the graph definition, if there is one.
|
| 466 |
+
|
| 467 |
+
If the graph is stored directly, return that. If no graph is stored
|
| 468 |
+
directly but a metagraph is stored containing a graph, return that.
|
| 469 |
+
|
| 470 |
+
Raises:
|
| 471 |
+
ValueError: If there is no graph for this run.
|
| 472 |
+
|
| 473 |
+
Returns:
|
| 474 |
+
The `graph_def` proto.
|
| 475 |
+
"""
|
| 476 |
+
graph = graph_pb2.GraphDef()
|
| 477 |
+
if self._graph is not None:
|
| 478 |
+
graph.ParseFromString(self._graph)
|
| 479 |
+
return graph
|
| 480 |
+
raise ValueError("There is no graph in this EventAccumulator")
|
| 481 |
+
|
| 482 |
+
def SerializedGraph(self):
|
| 483 |
+
"""Return the graph definition in serialized form, if there is one."""
|
| 484 |
+
return self._graph
|
| 485 |
+
|
| 486 |
+
def MetaGraph(self):
|
| 487 |
+
"""Return the metagraph definition, if there is one.
|
| 488 |
+
|
| 489 |
+
Raises:
|
| 490 |
+
ValueError: If there is no metagraph for this run.
|
| 491 |
+
|
| 492 |
+
Returns:
|
| 493 |
+
The `meta_graph_def` proto.
|
| 494 |
+
"""
|
| 495 |
+
if self._meta_graph is None:
|
| 496 |
+
raise ValueError("There is no metagraph in this EventAccumulator")
|
| 497 |
+
meta_graph = meta_graph_pb2.MetaGraphDef()
|
| 498 |
+
meta_graph.ParseFromString(self._meta_graph)
|
| 499 |
+
return meta_graph
|
| 500 |
+
|
| 501 |
+
def RunMetadata(self, tag):
|
| 502 |
+
"""Given a tag, return the associated session.run() metadata.
|
| 503 |
+
|
| 504 |
+
Args:
|
| 505 |
+
tag: A string tag associated with the event.
|
| 506 |
+
|
| 507 |
+
Raises:
|
| 508 |
+
ValueError: If the tag is not found.
|
| 509 |
+
|
| 510 |
+
Returns:
|
| 511 |
+
The metadata in form of `RunMetadata` proto.
|
| 512 |
+
"""
|
| 513 |
+
if tag not in self._tagged_metadata:
|
| 514 |
+
raise ValueError("There is no run metadata with this tag name")
|
| 515 |
+
|
| 516 |
+
run_metadata = config_pb2.RunMetadata()
|
| 517 |
+
run_metadata.ParseFromString(self._tagged_metadata[tag])
|
| 518 |
+
return run_metadata
|
| 519 |
+
|
| 520 |
+
def Tensors(self, tag):
|
| 521 |
+
"""Given a summary tag, return all associated tensors.
|
| 522 |
+
|
| 523 |
+
Args:
|
| 524 |
+
tag: A string tag associated with the events.
|
| 525 |
+
|
| 526 |
+
Raises:
|
| 527 |
+
KeyError: If the tag is not found.
|
| 528 |
+
|
| 529 |
+
Returns:
|
| 530 |
+
An array of `TensorEvent`s.
|
| 531 |
+
"""
|
| 532 |
+
return self.tensors_by_tag[tag].Items(_TENSOR_RESERVOIR_KEY)
|
| 533 |
+
|
| 534 |
+
def _MaybePurgeOrphanedData(self, event):
|
| 535 |
+
"""Maybe purge orphaned data due to a TensorFlow crash.
|
| 536 |
+
|
| 537 |
+
When TensorFlow crashes at step T+O and restarts at step T, any events
|
| 538 |
+
written after step T are now "orphaned" and will be at best misleading if
|
| 539 |
+
they are included in TensorBoard.
|
| 540 |
+
|
| 541 |
+
This logic attempts to determine if there is orphaned data, and purge it
|
| 542 |
+
if it is found.
|
| 543 |
+
|
| 544 |
+
Args:
|
| 545 |
+
event: The event to use as a reference, to determine if a purge is needed.
|
| 546 |
+
"""
|
| 547 |
+
if not self.purge_orphaned_data:
|
| 548 |
+
return
|
| 549 |
+
## Check if the event happened after a crash, and purge expired tags.
|
| 550 |
+
if self.file_version and self.file_version >= 2:
|
| 551 |
+
## If the file_version is recent enough, use the SessionLog enum
|
| 552 |
+
## to check for restarts.
|
| 553 |
+
self._CheckForRestartAndMaybePurge(event)
|
| 554 |
+
else:
|
| 555 |
+
## If there is no file version, default to old logic of checking for
|
| 556 |
+
## out of order steps.
|
| 557 |
+
self._CheckForOutOfOrderStepAndMaybePurge(event)
|
| 558 |
+
# After checking, update the most recent summary step and wall time.
|
| 559 |
+
if event.HasField("summary"):
|
| 560 |
+
self.most_recent_step = event.step
|
| 561 |
+
self.most_recent_wall_time = event.wall_time
|
| 562 |
+
|
| 563 |
+
def _CheckForRestartAndMaybePurge(self, event):
|
| 564 |
+
"""Check and discard expired events using SessionLog.START.
|
| 565 |
+
|
| 566 |
+
The first SessionLog.START event in a run indicates the start of a
|
| 567 |
+
supervisor session. Subsequent SessionLog.START events indicate a
|
| 568 |
+
*restart*, which may need to preempt old events. This method checks
|
| 569 |
+
for a session restart event and purges all previously seen events whose
|
| 570 |
+
step is larger than or equal to this event's step.
|
| 571 |
+
|
| 572 |
+
Because of supervisor threading, it is possible that this logic will
|
| 573 |
+
cause the first few event messages to be discarded since supervisor
|
| 574 |
+
threading does not guarantee that the START message is deterministically
|
| 575 |
+
written first.
|
| 576 |
+
|
| 577 |
+
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
|
| 578 |
+
can inadvertently discard events due to supervisor threading.
|
| 579 |
+
|
| 580 |
+
Args:
|
| 581 |
+
event: The event to use as reference. If the event is a START event, all
|
| 582 |
+
previously seen events with a greater event.step will be purged.
|
| 583 |
+
"""
|
| 584 |
+
if event.session_log.status != event_pb2.SessionLog.START:
|
| 585 |
+
return
|
| 586 |
+
if not self._seen_session_start:
|
| 587 |
+
# Initial start event: does not indicate a restart.
|
| 588 |
+
self._seen_session_start = True
|
| 589 |
+
return
|
| 590 |
+
self._Purge(event, by_tags=False)
|
| 591 |
+
|
| 592 |
+
def _CheckForOutOfOrderStepAndMaybePurge(self, event):
|
| 593 |
+
"""Check for out-of-order event.step and discard expired events for
|
| 594 |
+
tags.
|
| 595 |
+
|
| 596 |
+
Check if the event is out of order relative to the global most recent step.
|
| 597 |
+
If it is, purge outdated summaries for tags that the event contains.
|
| 598 |
+
|
| 599 |
+
Args:
|
| 600 |
+
event: The event to use as reference. If the event is out-of-order, all
|
| 601 |
+
events with the same tags, but with a greater event.step will be purged.
|
| 602 |
+
"""
|
| 603 |
+
if event.step < self.most_recent_step and event.HasField("summary"):
|
| 604 |
+
self._Purge(event, by_tags=True)
|
| 605 |
+
|
| 606 |
+
def _ProcessTensor(self, tag, wall_time, step, tensor):
|
| 607 |
+
tv = TensorEvent(wall_time=wall_time, step=step, tensor_proto=tensor)
|
| 608 |
+
with self._tensors_by_tag_lock:
|
| 609 |
+
if tag not in self.tensors_by_tag:
|
| 610 |
+
reservoir_size = self._GetTensorReservoirSize(tag)
|
| 611 |
+
self.tensors_by_tag[tag] = reservoir.Reservoir(reservoir_size)
|
| 612 |
+
self.tensors_by_tag[tag].AddItem(_TENSOR_RESERVOIR_KEY, tv)
|
| 613 |
+
|
| 614 |
+
def _GetTensorReservoirSize(self, tag):
|
| 615 |
+
default = self._size_guidance[TENSORS]
|
| 616 |
+
summary_metadata = self.summary_metadata.get(tag)
|
| 617 |
+
if summary_metadata is None:
|
| 618 |
+
return default
|
| 619 |
+
return self._tensor_size_guidance.get(
|
| 620 |
+
summary_metadata.plugin_data.plugin_name, default
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
def _Purge(self, event, by_tags):
|
| 624 |
+
"""Purge all events that have occurred after the given event.step.
|
| 625 |
+
|
| 626 |
+
If by_tags is True, purge all events that occurred after the given
|
| 627 |
+
event.step, but only for the tags that the event has. Non-sequential
|
| 628 |
+
event.steps suggest that a TensorFlow restart occurred, and we discard
|
| 629 |
+
the out-of-order events to display a consistent view in TensorBoard.
|
| 630 |
+
|
| 631 |
+
Discarding by tags is the safer method, when we are unsure whether a restart
|
| 632 |
+
has occurred, given that threading in supervisor can cause events of
|
| 633 |
+
different tags to arrive with unsynchronized step values.
|
| 634 |
+
|
| 635 |
+
If by_tags is False, then purge all events with event.step greater than the
|
| 636 |
+
given event.step. This can be used when we are certain that a TensorFlow
|
| 637 |
+
restart has occurred and these events can be discarded.
|
| 638 |
+
|
| 639 |
+
Args:
|
| 640 |
+
event: The event to use as reference for the purge. All events with
|
| 641 |
+
the same tags, but with a greater event.step will be purged.
|
| 642 |
+
by_tags: Bool to dictate whether to discard all out-of-order events or
|
| 643 |
+
only those that are associated with the given reference event.
|
| 644 |
+
"""
|
| 645 |
+
## Keep data in reservoirs that has a step less than event.step
|
| 646 |
+
_NotExpired = lambda x: x.step < event.step
|
| 647 |
+
|
| 648 |
+
num_expired = 0
|
| 649 |
+
if by_tags:
|
| 650 |
+
for value in event.summary.value:
|
| 651 |
+
if value.tag in self.tensors_by_tag:
|
| 652 |
+
tag_reservoir = self.tensors_by_tag[value.tag]
|
| 653 |
+
num_expired += tag_reservoir.FilterItems(
|
| 654 |
+
_NotExpired, _TENSOR_RESERVOIR_KEY
|
| 655 |
+
)
|
| 656 |
+
else:
|
| 657 |
+
for tag_reservoir in self.tensors_by_tag.values():
|
| 658 |
+
num_expired += tag_reservoir.FilterItems(
|
| 659 |
+
_NotExpired, _TENSOR_RESERVOIR_KEY
|
| 660 |
+
)
|
| 661 |
+
if num_expired > 0:
|
| 662 |
+
purge_msg = _GetPurgeMessage(
|
| 663 |
+
self.most_recent_step,
|
| 664 |
+
self.most_recent_wall_time,
|
| 665 |
+
event.step,
|
| 666 |
+
event.wall_time,
|
| 667 |
+
num_expired,
|
| 668 |
+
)
|
| 669 |
+
logger.warning(purge_msg)
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
def _GetPurgeMessage(
|
| 673 |
+
most_recent_step,
|
| 674 |
+
most_recent_wall_time,
|
| 675 |
+
event_step,
|
| 676 |
+
event_wall_time,
|
| 677 |
+
num_expired,
|
| 678 |
+
):
|
| 679 |
+
"""Return the string message associated with TensorBoard purges."""
|
| 680 |
+
return (
|
| 681 |
+
"Detected out of order event.step likely caused by a TensorFlow "
|
| 682 |
+
"restart. Purging {} expired tensor events from Tensorboard display "
|
| 683 |
+
"between the previous step: {} (timestamp: {}) and current step: {} "
|
| 684 |
+
"(timestamp: {})."
|
| 685 |
+
).format(
|
| 686 |
+
num_expired,
|
| 687 |
+
most_recent_step,
|
| 688 |
+
most_recent_wall_time,
|
| 689 |
+
event_step,
|
| 690 |
+
event_wall_time,
|
| 691 |
+
)
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
def _GeneratorFromPath(
|
| 695 |
+
path, event_file_active_filter=None, detect_file_replacement=None
|
| 696 |
+
):
|
| 697 |
+
"""Create an event generator for file or directory at given path string."""
|
| 698 |
+
if not path:
|
| 699 |
+
raise ValueError("path must be a valid string")
|
| 700 |
+
if io_wrapper.IsSummaryEventsFile(path):
|
| 701 |
+
return event_file_loader.EventFileLoader(path, detect_file_replacement)
|
| 702 |
+
elif event_file_active_filter:
|
| 703 |
+
loader_factory = (
|
| 704 |
+
lambda path: event_file_loader.TimestampedEventFileLoader(
|
| 705 |
+
path, detect_file_replacement
|
| 706 |
+
)
|
| 707 |
+
)
|
| 708 |
+
return directory_loader.DirectoryLoader(
|
| 709 |
+
path,
|
| 710 |
+
loader_factory,
|
| 711 |
+
path_filter=io_wrapper.IsSummaryEventsFile,
|
| 712 |
+
active_filter=event_file_active_filter,
|
| 713 |
+
)
|
| 714 |
+
else:
|
| 715 |
+
loader_factory = lambda path: event_file_loader.EventFileLoader(
|
| 716 |
+
path, detect_file_replacement
|
| 717 |
+
)
|
| 718 |
+
return directory_watcher.DirectoryWatcher(
|
| 719 |
+
path,
|
| 720 |
+
loader_factory,
|
| 721 |
+
io_wrapper.IsSummaryEventsFile,
|
| 722 |
+
)
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/json_util.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""A module providing a function for serializing JSON values with Infinity.
|
| 17 |
+
|
| 18 |
+
Python provides no way to override how json.dumps serializes
|
| 19 |
+
Infinity/-Infinity/NaN; if allow_nan is true, it encodes them as
|
| 20 |
+
Infinity/-Infinity/NaN, in violation of the JSON spec and in violation
|
| 21 |
+
of what JSON.parse accepts. If it's false, it throws a ValueError,
|
| 22 |
+
Neither subclassing JSONEncoder nor passing a function in the |default|
|
| 23 |
+
keyword argument overrides this.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
import collections
|
| 28 |
+
import math
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
_INFINITY = float("inf")
|
| 32 |
+
_NEGATIVE_INFINITY = float("-inf")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def Cleanse(obj, encoding="utf-8"):
|
| 36 |
+
"""Makes Python object appropriate for JSON serialization.
|
| 37 |
+
|
| 38 |
+
- Replaces instances of Infinity/-Infinity/NaN with strings.
|
| 39 |
+
- Turns byte strings into unicode strings.
|
| 40 |
+
- Turns sets into sorted lists.
|
| 41 |
+
- Turns tuples into lists.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
obj: Python data structure.
|
| 45 |
+
encoding: Charset used to decode byte strings.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
Unicode JSON data structure.
|
| 49 |
+
"""
|
| 50 |
+
if isinstance(obj, int):
|
| 51 |
+
return obj
|
| 52 |
+
elif isinstance(obj, float):
|
| 53 |
+
if obj == _INFINITY:
|
| 54 |
+
return "Infinity"
|
| 55 |
+
elif obj == _NEGATIVE_INFINITY:
|
| 56 |
+
return "-Infinity"
|
| 57 |
+
elif math.isnan(obj):
|
| 58 |
+
return "NaN"
|
| 59 |
+
else:
|
| 60 |
+
return obj
|
| 61 |
+
elif isinstance(obj, bytes):
|
| 62 |
+
return obj.decode(encoding)
|
| 63 |
+
elif isinstance(obj, (list, tuple)):
|
| 64 |
+
return [Cleanse(i, encoding) for i in obj]
|
| 65 |
+
elif isinstance(obj, set):
|
| 66 |
+
return [Cleanse(i, encoding) for i in sorted(obj)]
|
| 67 |
+
elif isinstance(obj, dict):
|
| 68 |
+
return collections.OrderedDict(
|
| 69 |
+
(Cleanse(k, encoding), Cleanse(v, encoding)) for k, v in obj.items()
|
| 70 |
+
)
|
| 71 |
+
else:
|
| 72 |
+
return obj
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/backend/path_prefix.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Internal path prefix support for TensorBoard.
|
| 16 |
+
|
| 17 |
+
Using a path prefix of `/foo/bar` enables TensorBoard to serve from
|
| 18 |
+
`http://localhost:6006/foo/bar/` rather than `http://localhost:6006/`.
|
| 19 |
+
See the `--path_prefix` flag docs for more details.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
from tensorboard import errors
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class PathPrefixMiddleware:
|
| 27 |
+
"""WSGI middleware for path prefixes.
|
| 28 |
+
|
| 29 |
+
All requests to this middleware must begin with the specified path
|
| 30 |
+
prefix (otherwise, a 404 will be returned immediately). Requests
|
| 31 |
+
will be forwarded to the underlying application with the path prefix
|
| 32 |
+
stripped and appended to `SCRIPT_NAME` (see the WSGI spec, PEP 3333,
|
| 33 |
+
for details).
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self, application, path_prefix):
|
| 37 |
+
"""Initializes this middleware.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
application: The WSGI application to wrap (see PEP 3333).
|
| 41 |
+
path_prefix: A string path prefix to be stripped from incoming
|
| 42 |
+
requests. If empty, this middleware is a no-op. If non-empty,
|
| 43 |
+
the path prefix must start with a slash and not end with one
|
| 44 |
+
(e.g., "/tensorboard").
|
| 45 |
+
"""
|
| 46 |
+
if path_prefix.endswith("/"):
|
| 47 |
+
raise ValueError(
|
| 48 |
+
"Path prefix must not end with slash: %r" % path_prefix
|
| 49 |
+
)
|
| 50 |
+
if path_prefix and not path_prefix.startswith("/"):
|
| 51 |
+
raise ValueError(
|
| 52 |
+
"Non-empty path prefix must start with slash: %r" % path_prefix
|
| 53 |
+
)
|
| 54 |
+
self._application = application
|
| 55 |
+
self._path_prefix = path_prefix
|
| 56 |
+
self._strict_prefix = self._path_prefix + "/"
|
| 57 |
+
|
| 58 |
+
def __call__(self, environ, start_response):
|
| 59 |
+
path = environ.get("PATH_INFO", "")
|
| 60 |
+
if path != self._path_prefix and not path.startswith(
|
| 61 |
+
self._strict_prefix
|
| 62 |
+
):
|
| 63 |
+
raise errors.NotFoundError()
|
| 64 |
+
environ["PATH_INFO"] = path[len(self._path_prefix) :]
|
| 65 |
+
environ["SCRIPT_NAME"] = (
|
| 66 |
+
environ.get("SCRIPT_NAME", "") + self._path_prefix
|
| 67 |
+
)
|
| 68 |
+
return self._application(environ, start_response)
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__init__.py
ADDED
|
File without changes
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (182 Bytes). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/allocation_description_pb2.cpython-310.pyc
ADDED
|
Binary file (1.56 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/api_def_pb2.cpython-310.pyc
ADDED
|
Binary file (2.77 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/attr_value_pb2.cpython-310.pyc
ADDED
|
Binary file (2.99 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cluster_pb2.cpython-310.pyc
ADDED
|
Binary file (1.73 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/config_pb2.cpython-310.pyc
ADDED
|
Binary file (11.3 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/coordination_config_pb2.cpython-310.pyc
ADDED
|
Binary file (1.89 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cost_graph_pb2.cpython-310.pyc
ADDED
|
Binary file (3.18 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/cpp_shape_inference_pb2.cpython-310.pyc
ADDED
|
Binary file (2.69 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/debug_pb2.cpython-310.pyc
ADDED
|
Binary file (2.12 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/event_pb2.cpython-310.pyc
ADDED
|
Binary file (4.67 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/full_type_pb2.cpython-310.pyc
ADDED
|
Binary file (3 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/function_pb2.cpython-310.pyc
ADDED
|
Binary file (4.28 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/graph_debug_info_pb2.cpython-310.pyc
ADDED
|
Binary file (3.13 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/graph_pb2.cpython-310.pyc
ADDED
|
Binary file (2.08 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/histogram_pb2.cpython-310.pyc
ADDED
|
Binary file (1.45 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/meta_graph_pb2.cpython-310.pyc
ADDED
|
Binary file (7.47 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/node_def_pb2.cpython-310.pyc
ADDED
|
Binary file (2.27 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/op_def_pb2.cpython-310.pyc
ADDED
|
Binary file (3.28 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/resource_handle_pb2.cpython-310.pyc
ADDED
|
Binary file (2.09 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/rewriter_config_pb2.cpython-310.pyc
ADDED
|
Binary file (5.6 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/rpc_options_pb2.cpython-310.pyc
ADDED
|
Binary file (1.37 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/saved_object_graph_pb2.cpython-310.pyc
ADDED
|
Binary file (6.79 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/saver_pb2.cpython-310.pyc
ADDED
|
Binary file (1.66 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/step_stats_pb2.cpython-310.pyc
ADDED
|
Binary file (4.16 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/struct_pb2.cpython-310.pyc
ADDED
|
Binary file (4.96 kB). View file
|
|
|
infer_4_33_0/lib/python3.10/site-packages/tensorboard/compat/proto/__pycache__/summary_pb2.cpython-310.pyc
ADDED
|
Binary file (3.66 kB). View file
|
|
|