I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,257 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Images plugin."""
import imghdr
import urllib.parse
from werkzeug import wrappers
from tensorboard import errors
from tensorboard import plugin_util
from tensorboard.backend import http_util
from tensorboard.data import provider
from tensorboard.plugins import base_plugin
from tensorboard.plugins.image import metadata
_IMGHDR_TO_MIMETYPE = {
"bmp": "image/bmp",
"gif": "image/gif",
"jpeg": "image/jpeg",
"png": "image/png",
"svg": "image/svg+xml",
}
_DEFAULT_IMAGE_MIMETYPE = "application/octet-stream"
_DEFAULT_DOWNSAMPLING = 10 # images per time series
# Extend imghdr.tests to include svg.
def detect_svg(data, f):
del f # Unused.
# Assume XML documents attached to image tag to be SVG.
if data.startswith(b"<?xml ") or data.startswith(b"<svg "):
return "svg"
imghdr.tests.append(detect_svg)
class ImagesPlugin(base_plugin.TBPlugin):
"""Images Plugin for TensorBoard."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates ImagesPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._downsample_to = (context.sampling_hints or {}).get(
self.plugin_name, _DEFAULT_DOWNSAMPLING
)
self._data_provider = context.data_provider
self._version_checker = plugin_util._MetadataVersionChecker(
data_kind="image",
latest_known_version=0,
)
def get_plugin_apps(self):
return {
"/images": self._serve_image_metadata,
"/individualImage": self._serve_individual_image,
"/tags": self._serve_tags,
}
def is_active(self):
return False # `list_plugins` as called by TB core suffices
def frontend_metadata(self):
return base_plugin.FrontendMetadata(element_name="tf-image-dashboard")
def _index_impl(self, ctx, experiment):
mapping = self._data_provider.list_blob_sequences(
ctx,
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME,
)
result = {run: {} for run in mapping}
for run, tag_to_content in mapping.items():
for tag, metadatum in tag_to_content.items():
md = metadata.parse_plugin_metadata(metadatum.plugin_content)
if not self._version_checker.ok(md.version, run, tag):
continue
description = plugin_util.markdown_to_safe_html(
metadatum.description
)
result[run][tag] = {
"displayName": metadatum.display_name,
"description": description,
"samples": metadatum.max_length - 2, # width, height
}
return result
@wrappers.Request.application
def _serve_image_metadata(self, request):
"""Given a tag and list of runs, serve a list of metadata for images.
Note that the images themselves are not sent; instead, we respond with URLs
to the images. The frontend should treat these URLs as opaque and should not
try to parse information about them or generate them itself, as the format
may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
tag = request.args.get("tag")
run = request.args.get("run")
sample = int(request.args.get("sample", 0))
try:
response = self._image_response_for_run(
ctx, experiment, run, tag, sample
)
except KeyError:
return http_util.Respond(
request, "Invalid run or tag", "text/plain", code=400
)
return http_util.Respond(request, response, "application/json")
def _image_response_for_run(self, ctx, experiment, run, tag, sample):
"""Builds a JSON-serializable object with information about images.
Args:
run: The name of the run.
tag: The name of the tag the images all belong to.
sample: The zero-indexed sample of the image for which to retrieve
information. For instance, setting `sample` to `2` will fetch
information about only the third image of each batch. Steps with
fewer than three images will be omitted from the results.
Returns:
A list of dictionaries containing the wall time, step, and URL
for each image.
Raises:
KeyError, NotFoundError: If no image data exists for the given
parameters.
"""
all_images = self._data_provider.read_blob_sequences(
ctx,
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME,
downsample=self._downsample_to,
run_tag_filter=provider.RunTagFilter(runs=[run], tags=[tag]),
)
images = all_images.get(run, {}).get(tag, None)
if images is None:
raise errors.NotFoundError(
"No image data for run=%r, tag=%r" % (run, tag)
)
return [
{
"wall_time": datum.wall_time,
"step": datum.step,
"query": self._data_provider_query(datum.values[sample + 2]),
}
for datum in images
if len(datum.values) - 2 > sample
]
def _filter_by_sample(self, tensor_events, sample):
return [
tensor_event
for tensor_event in tensor_events
if (
len(tensor_event.tensor_proto.string_val) - 2 # width, height
> sample
)
]
def _query_for_individual_image(self, run, tag, sample, index):
"""Builds a URL for accessing the specified image.
This should be kept in sync with _serve_image_metadata. Note that the URL is
*not* guaranteed to always return the same image, since images may be
unloaded from the reservoir as new images come in.
Args:
run: The name of the run.
tag: The tag.
sample: The relevant sample index, zero-indexed. See documentation
on `_image_response_for_run` for more details.
index: The index of the image. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th sampled image
in the given run with the given tag.
"""
query_string = urllib.parse.urlencode(
{
"run": run,
"tag": tag,
"sample": sample,
"index": index,
}
)
return query_string
def _data_provider_query(self, blob_reference):
return urllib.parse.urlencode({"blob_key": blob_reference.blob_key})
def _get_generic_data_individual_image(self, ctx, blob_key):
"""Returns the actual image bytes for a given image.
Args:
blob_key: As returned by a previous `read_blob_sequences` call.
Returns:
A bytestring of the raw image bytes.
"""
return self._data_provider.read_blob(ctx, blob_key=blob_key)
@wrappers.Request.application
def _serve_individual_image(self, request):
"""Serves an individual image."""
try:
ctx = plugin_util.context(request.environ)
blob_key = request.args["blob_key"]
data = self._get_generic_data_individual_image(ctx, blob_key)
except (KeyError, IndexError):
return http_util.Respond(
request,
"Invalid run, tag, index, or sample",
"text/plain",
code=400,
)
image_type = imghdr.what(None, data)
content_type = _IMGHDR_TO_MIMETYPE.get(
image_type, _DEFAULT_IMAGE_MIMETYPE
)
return http_util.Respond(request, data, content_type)
@wrappers.Request.application
def _serve_tags(self, request):
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
index = self._index_impl(ctx, experiment)
return http_util.Respond(request, index, "application/json")

View File

@ -0,0 +1,66 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Internal information about the images plugin."""
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins.image import plugin_data_pb2
PLUGIN_NAME = "images"
# The most recent value for the `version` field of the `ImagePluginData`
# proto.
PROTO_VERSION = 0
def create_summary_metadata(
display_name, description, *, converted_to_tensor=None
):
"""Create a `summary_pb2.SummaryMetadata` proto for image plugin data.
Returns:
A `summary_pb2.SummaryMetadata` protobuf object.
"""
content = plugin_data_pb2.ImagePluginData(
version=PROTO_VERSION,
converted_to_tensor=converted_to_tensor,
)
metadata = summary_pb2.SummaryMetadata(
display_name=display_name,
summary_description=description,
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME, content=content.SerializeToString()
),
)
return metadata
def parse_plugin_metadata(content):
"""Parse summary metadata to a Python object.
Arguments:
content: The `content` field of a `SummaryMetadata` proto
corresponding to the image plugin.
Returns:
An `ImagePluginData` protobuf object.
"""
if not isinstance(content, bytes):
raise TypeError("Content type must be bytes")
result = plugin_data_pb2.ImagePluginData.FromString(content)
if result.version == 0:
return result
# No other versions known at this time, so no migrations to do.
return result

View File

@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorboard/plugins/image/plugin_data.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n+tensorboard/plugins/image/plugin_data.proto\x12\x0btensorboard\"?\n\x0fImagePluginData\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x1b\n\x13\x63onverted_to_tensor\x18\x02 \x01(\x08\x62\x06proto3')
_IMAGEPLUGINDATA = DESCRIPTOR.message_types_by_name['ImagePluginData']
ImagePluginData = _reflection.GeneratedProtocolMessageType('ImagePluginData', (_message.Message,), {
'DESCRIPTOR' : _IMAGEPLUGINDATA,
'__module__' : 'tensorboard.plugins.image.plugin_data_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.ImagePluginData)
})
_sym_db.RegisterMessage(ImagePluginData)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_IMAGEPLUGINDATA._serialized_start=60
_IMAGEPLUGINDATA._serialized_end=123
# @@protoc_insertion_point(module_scope)

View File

@ -0,0 +1,161 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image summaries and TensorFlow operations to create them.
An image summary stores the width, height, and PNG-encoded data for zero
or more images in a rank-1 string array: `[w, h, png0, png1, ...]`.
NOTE: This module is in beta, and its API is subject to change, but the
data that it stores to disk will be supported forever.
"""
import numpy as np
from tensorboard.plugins.image import metadata
from tensorboard.plugins.image import summary_v2
from tensorboard.util import encoder
# Export V2 versions.
image = summary_v2.image
def op(
name,
images,
max_outputs=3,
display_name=None,
description=None,
collections=None,
):
"""Create a legacy image summary op for use in a TensorFlow graph.
Arguments:
name: A unique name for the generated summary node.
images: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
where `k` is the number of images, `h` and `w` are the height and
width of the images, and `c` is the number of channels, which
should be 1, 3, or 4. Any of the dimensions may be statically
unknown (i.e., `None`).
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many images will be emitted at each step. When more than
`max_outputs` many images are provided, the first `max_outputs` many
images will be used and the rest silently discarded.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description
)
with tf.name_scope(name), tf.control_dependencies(
[
tf.assert_rank(images, 4),
tf.assert_type(images, tf.uint8),
tf.assert_non_negative(max_outputs),
]
):
limited_images = images[:max_outputs]
encoded_images = tf.map_fn(
tf.image.encode_png,
limited_images,
dtype=tf.string,
name="encode_each_image",
)
image_shape = tf.shape(input=images)
dimensions = tf.stack(
[
tf.as_string(image_shape[2], name="width"),
tf.as_string(image_shape[1], name="height"),
],
name="dimensions",
)
tensor = tf.concat([dimensions, encoded_images], axis=0)
return tf.summary.tensor_summary(
name="image_summary",
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata,
)
def pb(name, images, max_outputs=3, display_name=None, description=None):
"""Create a legacy image summary protobuf.
This behaves as if you were to create an `op` with the same arguments
(wrapped with constant tensors where appropriate) and then execute
that summary op in a TensorFlow session.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
images: An `np.array` representing pixel data with shape
`[k, h, w, c]`, where `k` is the number of images, `w` and `h` are
the width and height of the images, and `c` is the number of
channels, which should be 1, 3, or 4.
max_outputs: Optional `int`. At most this many images will be
emitted. If more than this many images are provided, the first
`max_outputs` many images will be used and the rest silently
discarded.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
images = np.array(images).astype(np.uint8)
if images.ndim != 4:
raise ValueError("Shape %r must have rank 4" % (images.shape,))
limited_images = images[:max_outputs]
encoded_images = [encoder.encode_png(image) for image in limited_images]
(width, height) = (images.shape[2], images.shape[1])
content = [str(width), str(height)] + encoded_images
tensor = tf.make_tensor_proto(content, dtype=tf.string)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description
)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString()
)
summary = tf.Summary()
summary.value.add(
tag="%s/image_summary" % name,
metadata=tf_summary_metadata,
tensor=tensor,
)
return summary

View File

@ -0,0 +1,131 @@
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image summaries and TensorFlow operations to create them, V2 versions.
An image summary stores the width, height, and PNG-encoded data for zero
or more images in a rank-1 string array: `[w, h, png0, png1, ...]`.
"""
from tensorboard.compat import tf2 as tf
from tensorboard.plugins.image import metadata
from tensorboard.util import lazy_tensor_creator
def image(name, data, step=None, max_outputs=3, description=None):
"""Write an image summary.
See also `tf.summary.scalar`, `tf.summary.SummaryWriter`.
Writes a collection of images to the current default summary writer. Data
appears in TensorBoard's 'Images' dashboard. Like `tf.summary.scalar` points,
each collection of images is associated with a `step` and a `name`. All the
image collections with the same `name` constitute a time series of image
collections.
This example writes 2 random grayscale images:
```python
w = tf.summary.create_file_writer('test/logs')
with w.as_default():
image1 = tf.random.uniform(shape=[8, 8, 1])
image2 = tf.random.uniform(shape=[8, 8, 1])
tf.summary.image("grayscale_noise", [image1, image2], step=0)
```
To avoid clipping, data should be converted to one of the following:
- floating point values in the range [0,1], or
- uint8 values in the range [0,255]
```python
# Convert the original dtype=int32 `Tensor` into `dtype=float64`.
rgb_image_float = tf.constant([
[[1000, 0, 0], [0, 500, 1000]],
]) / 1000
tf.summary.image("picture", [rgb_image_float], step=0)
# Convert original dtype=uint8 `Tensor` into proper range.
rgb_image_uint8 = tf.constant([
[[1, 1, 0], [0, 0, 1]],
], dtype=tf.uint8) * 255
tf.summary.image("picture", [rgb_image_uint8], step=1)
```
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
where `k` is the number of images, `h` and `w` are the height and
width of the images, and `c` is the number of channels, which
should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA).
Any of the dimensions may be statically unknown (i.e., `None`).
Floating point data will be clipped to the range [0,1]. Other data types
will be clipped into an allowed range for safe casting to uint8, using
`tf.image.convert_image_dtype`.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many images will be emitted at each step. When more than
`max_outputs` many images are provided, the first `max_outputs` many
images will be used and the rest silently discarded.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was emitted because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description
)
# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
summary_scope = (
getattr(tf.summary.experimental, "summary_scope", None)
or tf.summary.summary_scope
)
with summary_scope(
name, "image_summary", values=[data, max_outputs, step]
) as (tag, _):
# Defer image encoding preprocessing by passing it as a callable to write(),
# wrapped in a LazyTensorCreator for backwards compatibility, so that we
# only do this work when summaries are actually written.
@lazy_tensor_creator.LazyTensorCreator
def lazy_tensor():
tf.debugging.assert_rank(data, 4)
tf.debugging.assert_non_negative(max_outputs)
images = tf.image.convert_image_dtype(data, tf.uint8, saturate=True)
limited_images = images[:max_outputs]
encoded_images = tf.image.encode_png(limited_images)
image_shape = tf.shape(input=images)
dimensions = tf.stack(
[
tf.as_string(image_shape[2], name="width"),
tf.as_string(image_shape[1], name="height"),
],
name="dimensions",
)
return tf.concat([dimensions, encoded_images], axis=0)
# To ensure that image encoding logic is only executed when summaries
# are written, we pass callable to `tensor` parameter.
return tf.summary.write(
tag=tag, tensor=lazy_tensor, step=step, metadata=summary_metadata
)