I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,365 @@
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A data provider that talks to a gRPC server."""
import collections
import contextlib
import grpc
from tensorboard.util import tensor_util
from tensorboard.util import timing
from tensorboard import errors
from tensorboard.data import provider
from tensorboard.data.proto import data_provider_pb2
from tensorboard.data.proto import data_provider_pb2_grpc
def make_stub(channel):
"""Wraps a gRPC channel with a service stub."""
return data_provider_pb2_grpc.TensorBoardDataProviderStub(channel)
class GrpcDataProvider(provider.DataProvider):
"""Data provider that talks over gRPC."""
def __init__(self, addr, stub):
"""Initializes a GrpcDataProvider.
Args:
addr: String address of the remote peer. Used cosmetically for
data location.
stub: `data_provider_pb2_grpc.TensorBoardDataProviderStub`
value. See `make_stub` to construct one from a channel.
"""
self._addr = addr
self._stub = stub
def __str__(self):
return "GrpcDataProvider(addr=%r)" % self._addr
def experiment_metadata(self, ctx, *, experiment_id):
req = data_provider_pb2.GetExperimentRequest()
req.experiment_id = experiment_id
with _translate_grpc_error():
res = self._stub.GetExperiment(req)
res = provider.ExperimentMetadata(
data_location=res.data_location,
experiment_name=res.name,
experiment_description=res.description,
creation_time=_timestamp_proto_to_float(res.creation_time),
)
return res
def list_plugins(self, ctx, *, experiment_id):
req = data_provider_pb2.ListPluginsRequest()
req.experiment_id = experiment_id
with _translate_grpc_error():
res = self._stub.ListPlugins(req)
return [p.name for p in res.plugins]
def list_runs(self, ctx, *, experiment_id):
req = data_provider_pb2.ListRunsRequest()
req.experiment_id = experiment_id
with _translate_grpc_error():
res = self._stub.ListRuns(req)
return [
provider.Run(
run_id=run.name,
run_name=run.name,
start_time=run.start_time,
)
for run in res.runs
]
@timing.log_latency
def list_scalars(
self, ctx, *, experiment_id, plugin_name, run_tag_filter=None
):
with timing.log_latency("build request"):
req = data_provider_pb2.ListScalarsRequest()
req.experiment_id = experiment_id
req.plugin_filter.plugin_name = plugin_name
_populate_rtf(run_tag_filter, req.run_tag_filter)
with timing.log_latency("_stub.ListScalars"):
with _translate_grpc_error():
res = self._stub.ListScalars(req)
with timing.log_latency("build result"):
result = {}
for run_entry in res.runs:
tags = {}
result[run_entry.run_name] = tags
for tag_entry in run_entry.tags:
time_series = tag_entry.metadata
tags[tag_entry.tag_name] = provider.ScalarTimeSeries(
max_step=time_series.max_step,
max_wall_time=time_series.max_wall_time,
plugin_content=time_series.summary_metadata.plugin_data.content,
description=time_series.summary_metadata.summary_description,
display_name=time_series.summary_metadata.display_name,
)
return result
@timing.log_latency
def read_scalars(
self,
ctx,
*,
experiment_id,
plugin_name,
downsample=None,
run_tag_filter=None,
):
with timing.log_latency("build request"):
req = data_provider_pb2.ReadScalarsRequest()
req.experiment_id = experiment_id
req.plugin_filter.plugin_name = plugin_name
_populate_rtf(run_tag_filter, req.run_tag_filter)
req.downsample.num_points = downsample
with timing.log_latency("_stub.ReadScalars"):
with _translate_grpc_error():
res = self._stub.ReadScalars(req)
with timing.log_latency("build result"):
result = {}
for run_entry in res.runs:
tags = {}
result[run_entry.run_name] = tags
for tag_entry in run_entry.tags:
series = []
tags[tag_entry.tag_name] = series
d = tag_entry.data
for step, wt, value in zip(d.step, d.wall_time, d.value):
point = provider.ScalarDatum(
step=step,
wall_time=wt,
value=value,
)
series.append(point)
return result
@timing.log_latency
def read_last_scalars(
self,
ctx,
*,
experiment_id,
plugin_name,
run_tag_filter=None,
):
with timing.log_latency("build request"):
req = data_provider_pb2.ReadScalarsRequest()
req.experiment_id = experiment_id
req.plugin_filter.plugin_name = plugin_name
_populate_rtf(run_tag_filter, req.run_tag_filter)
# `ReadScalars` always includes the most recent datum, therefore
# downsampling to one means fetching the latest value.
req.downsample.num_points = 1
with timing.log_latency("_stub.ReadScalars"):
with _translate_grpc_error():
res = self._stub.ReadScalars(req)
with timing.log_latency("build result"):
result = collections.defaultdict(dict)
for run_entry in res.runs:
run_name = run_entry.run_name
for tag_entry in run_entry.tags:
d = tag_entry.data
# There should be no more than one datum in
# `tag_entry.data` since downsample was set to 1.
for step, wt, value in zip(d.step, d.wall_time, d.value):
result[run_name][tag_entry.tag_name] = (
provider.ScalarDatum(
step=step,
wall_time=wt,
value=value,
)
)
return result
@timing.log_latency
def list_tensors(
self, ctx, *, experiment_id, plugin_name, run_tag_filter=None
):
with timing.log_latency("build request"):
req = data_provider_pb2.ListTensorsRequest()
req.experiment_id = experiment_id
req.plugin_filter.plugin_name = plugin_name
_populate_rtf(run_tag_filter, req.run_tag_filter)
with timing.log_latency("_stub.ListTensors"):
with _translate_grpc_error():
res = self._stub.ListTensors(req)
with timing.log_latency("build result"):
result = {}
for run_entry in res.runs:
tags = {}
result[run_entry.run_name] = tags
for tag_entry in run_entry.tags:
time_series = tag_entry.metadata
tags[tag_entry.tag_name] = provider.TensorTimeSeries(
max_step=time_series.max_step,
max_wall_time=time_series.max_wall_time,
plugin_content=time_series.summary_metadata.plugin_data.content,
description=time_series.summary_metadata.summary_description,
display_name=time_series.summary_metadata.display_name,
)
return result
@timing.log_latency
def read_tensors(
self,
ctx,
*,
experiment_id,
plugin_name,
downsample=None,
run_tag_filter=None,
):
with timing.log_latency("build request"):
req = data_provider_pb2.ReadTensorsRequest()
req.experiment_id = experiment_id
req.plugin_filter.plugin_name = plugin_name
_populate_rtf(run_tag_filter, req.run_tag_filter)
req.downsample.num_points = downsample
with timing.log_latency("_stub.ReadTensors"):
with _translate_grpc_error():
res = self._stub.ReadTensors(req)
with timing.log_latency("build result"):
result = {}
for run_entry in res.runs:
tags = {}
result[run_entry.run_name] = tags
for tag_entry in run_entry.tags:
series = []
tags[tag_entry.tag_name] = series
d = tag_entry.data
for step, wt, value in zip(d.step, d.wall_time, d.value):
point = provider.TensorDatum(
step=step,
wall_time=wt,
numpy=tensor_util.make_ndarray(value),
)
series.append(point)
return result
@timing.log_latency
def list_blob_sequences(
self, ctx, experiment_id, plugin_name, run_tag_filter=None
):
with timing.log_latency("build request"):
req = data_provider_pb2.ListBlobSequencesRequest()
req.experiment_id = experiment_id
req.plugin_filter.plugin_name = plugin_name
_populate_rtf(run_tag_filter, req.run_tag_filter)
with timing.log_latency("_stub.ListBlobSequences"):
with _translate_grpc_error():
res = self._stub.ListBlobSequences(req)
with timing.log_latency("build result"):
result = {}
for run_entry in res.runs:
tags = {}
result[run_entry.run_name] = tags
for tag_entry in run_entry.tags:
time_series = tag_entry.metadata
tags[tag_entry.tag_name] = provider.BlobSequenceTimeSeries(
max_step=time_series.max_step,
max_wall_time=time_series.max_wall_time,
max_length=time_series.max_length,
plugin_content=time_series.summary_metadata.plugin_data.content,
description=time_series.summary_metadata.summary_description,
display_name=time_series.summary_metadata.display_name,
)
return result
@timing.log_latency
def read_blob_sequences(
self,
ctx,
experiment_id,
plugin_name,
downsample=None,
run_tag_filter=None,
):
with timing.log_latency("build request"):
req = data_provider_pb2.ReadBlobSequencesRequest()
req.experiment_id = experiment_id
req.plugin_filter.plugin_name = plugin_name
_populate_rtf(run_tag_filter, req.run_tag_filter)
req.downsample.num_points = downsample
with timing.log_latency("_stub.ReadBlobSequences"):
with _translate_grpc_error():
res = self._stub.ReadBlobSequences(req)
with timing.log_latency("build result"):
result = {}
for run_entry in res.runs:
tags = {}
result[run_entry.run_name] = tags
for tag_entry in run_entry.tags:
series = []
tags[tag_entry.tag_name] = series
d = tag_entry.data
for step, wt, blob_sequence in zip(
d.step, d.wall_time, d.values
):
values = []
for ref in blob_sequence.blob_refs:
values.append(
provider.BlobReference(
blob_key=ref.blob_key, url=ref.url or None
)
)
point = provider.BlobSequenceDatum(
step=step, wall_time=wt, values=tuple(values)
)
series.append(point)
return result
@timing.log_latency
def read_blob(self, ctx, blob_key):
with timing.log_latency("build request"):
req = data_provider_pb2.ReadBlobRequest()
req.blob_key = blob_key
with timing.log_latency("list(_stub.ReadBlob)"):
with _translate_grpc_error():
responses = list(self._stub.ReadBlob(req))
with timing.log_latency("build result"):
return b"".join(res.data for res in responses)
@contextlib.contextmanager
def _translate_grpc_error():
try:
yield
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.INVALID_ARGUMENT:
raise errors.InvalidArgumentError(e.details())
if e.code() == grpc.StatusCode.NOT_FOUND:
raise errors.NotFoundError(e.details())
if e.code() == grpc.StatusCode.PERMISSION_DENIED:
raise errors.PermissionDeniedError(e.details())
raise
def _populate_rtf(run_tag_filter, rtf_proto):
"""Copies `run_tag_filter` into `rtf_proto`."""
if run_tag_filter is None:
return
if run_tag_filter.runs is not None:
rtf_proto.runs.names[:] = sorted(run_tag_filter.runs)
if run_tag_filter.tags is not None:
rtf_proto.tags.names[:] = sorted(run_tag_filter.tags)
def _timestamp_proto_to_float(ts):
"""Converts `timestamp_pb2.Timestamp` to float seconds since epoch."""
return ts.ToNanoseconds() / 1e9

View File

@ -0,0 +1,46 @@
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstraction for data ingestion logic."""
import abc
class DataIngester(metaclass=abc.ABCMeta):
"""Link between a data source and a data provider.
A data ingester starts a reload operation in the background and
provides a data provider as a view.
"""
@property
@abc.abstractmethod
def data_provider(self):
"""Returns a `DataProvider`.
It may be an error to dereference this before `start` is called.
"""
pass
@abc.abstractmethod
def start(self):
"""Starts ingesting data.
This may start a background thread or process, and will return
once communication with that task is established. It won't block
forever as data is reloaded.
Must only be called once.
"""
pass

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,374 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from tensorboard.data.proto import data_provider_pb2 as tensorboard_dot_data_dot_proto_dot_data__provider__pb2
class TensorBoardDataProviderStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetExperiment = channel.unary_unary(
'/tensorboard.data.TensorBoardDataProvider/GetExperiment',
request_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.GetExperimentRequest.SerializeToString,
response_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.GetExperimentResponse.FromString,
)
self.ListPlugins = channel.unary_unary(
'/tensorboard.data.TensorBoardDataProvider/ListPlugins',
request_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListPluginsRequest.SerializeToString,
response_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListPluginsResponse.FromString,
)
self.ListRuns = channel.unary_unary(
'/tensorboard.data.TensorBoardDataProvider/ListRuns',
request_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListRunsRequest.SerializeToString,
response_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListRunsResponse.FromString,
)
self.ListScalars = channel.unary_unary(
'/tensorboard.data.TensorBoardDataProvider/ListScalars',
request_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListScalarsRequest.SerializeToString,
response_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListScalarsResponse.FromString,
)
self.ReadScalars = channel.unary_unary(
'/tensorboard.data.TensorBoardDataProvider/ReadScalars',
request_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadScalarsRequest.SerializeToString,
response_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadScalarsResponse.FromString,
)
self.ListTensors = channel.unary_unary(
'/tensorboard.data.TensorBoardDataProvider/ListTensors',
request_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListTensorsRequest.SerializeToString,
response_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListTensorsResponse.FromString,
)
self.ReadTensors = channel.unary_unary(
'/tensorboard.data.TensorBoardDataProvider/ReadTensors',
request_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadTensorsRequest.SerializeToString,
response_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadTensorsResponse.FromString,
)
self.ListBlobSequences = channel.unary_unary(
'/tensorboard.data.TensorBoardDataProvider/ListBlobSequences',
request_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListBlobSequencesRequest.SerializeToString,
response_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListBlobSequencesResponse.FromString,
)
self.ReadBlobSequences = channel.unary_unary(
'/tensorboard.data.TensorBoardDataProvider/ReadBlobSequences',
request_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobSequencesRequest.SerializeToString,
response_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobSequencesResponse.FromString,
)
self.ReadBlob = channel.unary_stream(
'/tensorboard.data.TensorBoardDataProvider/ReadBlob',
request_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobRequest.SerializeToString,
response_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobResponse.FromString,
)
class TensorBoardDataProviderServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetExperiment(self, request, context):
"""Get metadata about an experiment.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListPlugins(self, request, context):
"""List plugins that have data for an experiment.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListRuns(self, request, context):
"""List runs within an experiment.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListScalars(self, request, context):
"""List metadata about scalar time series.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReadScalars(self, request, context):
"""Read data from scalar time series.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTensors(self, request, context):
"""List metadata about tensor time series.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReadTensors(self, request, context):
"""Read data from tensor time series.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListBlobSequences(self, request, context):
"""List metadata about blob sequence time series.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReadBlobSequences(self, request, context):
"""Read blob references from blob sequence time series. See `ReadBlob` to read
the actual blob data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReadBlob(self, request, context):
"""Read data for a specific blob.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TensorBoardDataProviderServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetExperiment': grpc.unary_unary_rpc_method_handler(
servicer.GetExperiment,
request_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.GetExperimentRequest.FromString,
response_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.GetExperimentResponse.SerializeToString,
),
'ListPlugins': grpc.unary_unary_rpc_method_handler(
servicer.ListPlugins,
request_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListPluginsRequest.FromString,
response_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListPluginsResponse.SerializeToString,
),
'ListRuns': grpc.unary_unary_rpc_method_handler(
servicer.ListRuns,
request_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListRunsRequest.FromString,
response_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListRunsResponse.SerializeToString,
),
'ListScalars': grpc.unary_unary_rpc_method_handler(
servicer.ListScalars,
request_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListScalarsRequest.FromString,
response_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListScalarsResponse.SerializeToString,
),
'ReadScalars': grpc.unary_unary_rpc_method_handler(
servicer.ReadScalars,
request_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadScalarsRequest.FromString,
response_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadScalarsResponse.SerializeToString,
),
'ListTensors': grpc.unary_unary_rpc_method_handler(
servicer.ListTensors,
request_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListTensorsRequest.FromString,
response_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListTensorsResponse.SerializeToString,
),
'ReadTensors': grpc.unary_unary_rpc_method_handler(
servicer.ReadTensors,
request_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadTensorsRequest.FromString,
response_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadTensorsResponse.SerializeToString,
),
'ListBlobSequences': grpc.unary_unary_rpc_method_handler(
servicer.ListBlobSequences,
request_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListBlobSequencesRequest.FromString,
response_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListBlobSequencesResponse.SerializeToString,
),
'ReadBlobSequences': grpc.unary_unary_rpc_method_handler(
servicer.ReadBlobSequences,
request_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobSequencesRequest.FromString,
response_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobSequencesResponse.SerializeToString,
),
'ReadBlob': grpc.unary_stream_rpc_method_handler(
servicer.ReadBlob,
request_deserializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobRequest.FromString,
response_serializer=tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorboard.data.TensorBoardDataProvider', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class TensorBoardDataProvider(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetExperiment(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tensorboard.data.TensorBoardDataProvider/GetExperiment',
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.GetExperimentRequest.SerializeToString,
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.GetExperimentResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListPlugins(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tensorboard.data.TensorBoardDataProvider/ListPlugins',
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListPluginsRequest.SerializeToString,
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListPluginsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListRuns(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tensorboard.data.TensorBoardDataProvider/ListRuns',
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListRunsRequest.SerializeToString,
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListRunsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListScalars(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tensorboard.data.TensorBoardDataProvider/ListScalars',
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListScalarsRequest.SerializeToString,
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListScalarsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ReadScalars(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tensorboard.data.TensorBoardDataProvider/ReadScalars',
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadScalarsRequest.SerializeToString,
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadScalarsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListTensors(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tensorboard.data.TensorBoardDataProvider/ListTensors',
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListTensorsRequest.SerializeToString,
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListTensorsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ReadTensors(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tensorboard.data.TensorBoardDataProvider/ReadTensors',
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadTensorsRequest.SerializeToString,
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadTensorsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListBlobSequences(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tensorboard.data.TensorBoardDataProvider/ListBlobSequences',
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListBlobSequencesRequest.SerializeToString,
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ListBlobSequencesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ReadBlobSequences(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tensorboard.data.TensorBoardDataProvider/ReadBlobSequences',
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobSequencesRequest.SerializeToString,
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobSequencesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ReadBlob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/tensorboard.data.TensorBoardDataProvider/ReadBlob',
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobRequest.SerializeToString,
tensorboard_dot_data_dot_proto_dot_data__provider__pb2.ReadBlobResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,302 @@
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data ingestion logic backed by a gRPC server."""
import errno
import logging
import os
import subprocess
import tempfile
import time
import grpc
import pkg_resources
from tensorboard.data import grpc_provider
from tensorboard.data import ingester
from tensorboard.data.proto import data_provider_pb2
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
# If this environment variable is non-empty, it will be used as the path to the
# data server binary rather than using a bundled version.
_ENV_DATA_SERVER_BINARY = "TENSORBOARD_DATA_SERVER_BINARY"
class ExistingServerDataIngester(ingester.DataIngester):
"""Connect to an already running gRPC server."""
def __init__(self, address, *, channel_creds_type):
"""Initializes an ingester with the given configuration.
Args:
address: String, as passed to `--grpc_data_provider`.
channel_creds_type: `grpc_util.ChannelCredsType`, as passed to
`--grpc_creds_type`.
"""
stub = _make_stub(address, channel_creds_type)
self._data_provider = grpc_provider.GrpcDataProvider(address, stub)
@property
def data_provider(self):
return self._data_provider
def start(self):
pass
class SubprocessServerDataIngester(ingester.DataIngester):
"""Start a new data server as a subprocess."""
def __init__(
self,
server_binary,
logdir,
*,
reload_interval,
channel_creds_type,
samples_per_plugin=None,
extra_flags=None,
):
"""Initializes an ingester with the given configuration.
Args:
server_binary: `ServerBinary` to launch.
logdir: String, as passed to `--logdir`.
reload_interval: Number, as passed to `--reload_interval`.
channel_creds_type: `grpc_util.ChannelCredsType`, as passed to
`--grpc_creds_type`.
samples_per_plugin: Dict[String, Int], as parsed from
`--samples_per_plugin`.
extra_flags: List of extra string flags to be passed to the
data server without further interpretation.
"""
self._server_binary = server_binary
self._data_provider = None
self._logdir = logdir
self._reload_interval = reload_interval
self._channel_creds_type = channel_creds_type
self._samples_per_plugin = samples_per_plugin or {}
self._extra_flags = list(extra_flags or [])
@property
def data_provider(self):
if self._data_provider is None:
raise RuntimeError("Must call `start` first")
return self._data_provider
def start(self):
if self._data_provider:
return
tmpdir = tempfile.TemporaryDirectory(prefix="tensorboard_data_server_")
port_file_path = os.path.join(tmpdir.name, "port")
error_file_path = os.path.join(tmpdir.name, "startup_error")
if self._reload_interval <= 0:
reload = "once"
else:
reload = str(int(self._reload_interval))
sample_hint_pairs = [
"%s=%s" % (k, "all" if v == 0 else v)
for k, v in self._samples_per_plugin.items()
]
samples_per_plugin = ",".join(sample_hint_pairs)
args = [
self._server_binary.path,
"--logdir=%s" % os.path.expanduser(self._logdir),
"--reload=%s" % reload,
"--samples-per-plugin=%s" % samples_per_plugin,
"--port=0",
"--port-file=%s" % (port_file_path,),
"--die-after-stdin",
]
if self._server_binary.at_least_version("0.5.0a0"):
args.append("--error-file=%s" % (error_file_path,))
if logger.isEnabledFor(logging.INFO):
args.append("--verbose")
if logger.isEnabledFor(logging.DEBUG):
args.append("--verbose") # Repeat arg to increase verbosity.
args.extend(self._extra_flags)
logger.info("Spawning data server: %r", args)
popen = subprocess.Popen(args, stdin=subprocess.PIPE)
# Stash stdin to avoid calling its destructor: on Windows, this
# is a `subprocess.Handle` that closes itself in `__del__`,
# which would cause the data server to shut down. (This is not
# documented; you have to read CPython source to figure it out.)
# We want that to happen at end of process, but not before.
self._stdin_handle = popen.stdin # stash to avoid stdin being closed
port = None
# The server only needs about 10 microseconds to spawn on my machine,
# but give a few orders of magnitude of padding, and then poll.
time.sleep(0.01)
for i in range(20):
if popen.poll() is not None:
msg = (_maybe_read_file(error_file_path) or "").strip()
if not msg:
msg = (
"exited with %d; check stderr for details"
% popen.poll()
)
raise DataServerStartupError(msg)
logger.info("Polling for data server port (attempt %d)", i)
port_file_contents = _maybe_read_file(port_file_path)
logger.info("Port file contents: %r", port_file_contents)
if (port_file_contents or "").endswith("\n"):
port = int(port_file_contents)
break
# Else, not done writing yet.
time.sleep(0.5)
if port is None:
raise DataServerStartupError(
"Timed out while waiting for data server to start. "
"It may still be running as pid %d." % popen.pid
)
addr = "localhost:%d" % port
stub = _make_stub(addr, self._channel_creds_type)
logger.info(
"Opened channel to data server at pid %d via %s",
popen.pid,
addr,
)
req = data_provider_pb2.GetExperimentRequest()
try:
stub.GetExperiment(req, timeout=5) # should be near-instant
except grpc.RpcError as e:
msg = "Failed to communicate with data server at %s: %s" % (addr, e)
logging.warning("%s", msg)
raise DataServerStartupError(msg) from e
logger.info("Got valid response from data server")
self._data_provider = grpc_provider.GrpcDataProvider(addr, stub)
def _maybe_read_file(path):
"""Read a file, or return `None` on ENOENT specifically."""
try:
with open(path) as infile:
return infile.read()
except OSError as e:
if e.errno == errno.ENOENT:
return None
raise
def _make_stub(addr, channel_creds_type):
(creds, options) = channel_creds_type.channel_config()
options.append(("grpc.max_receive_message_length", 1024 * 1024 * 256))
channel = grpc.secure_channel(addr, creds, options=options)
return grpc_provider.make_stub(channel)
class NoDataServerError(RuntimeError):
pass
class DataServerStartupError(RuntimeError):
pass
class ServerBinary:
"""Information about a data server binary."""
def __init__(self, path, version):
"""Initializes a `ServerBinary`.
Args:
path: String path to executable on disk.
version: PEP 396-compliant version string, or `None` if
unknown or not applicable. Binaries at unknown versions are
assumed to be bleeding-edge: if you bring your own binary,
it's on you to make sure that it's up to date.
"""
self._path = path
self._version = (
pkg_resources.parse_version(version)
if version is not None
else version
)
@property
def path(self):
return self._path
def at_least_version(self, required_version):
"""Test whether the binary's version is at least the given one.
Useful for gating features that are available in the latest data
server builds from head, but not yet released to PyPI. For
example, if v0.4.0 is the latest published version, you can
check `at_least_version("0.5.0a0")` to include both prereleases
at head and the eventual final release of v0.5.0.
If this binary's version was set to `None` at construction time,
this method always returns `True`.
Args:
required_version: PEP 396-compliant version string.
Returns:
Boolean.
"""
if self._version is None:
return True
return self._version >= pkg_resources.parse_version(required_version)
def get_server_binary():
"""Get `ServerBinary` info or raise `NoDataServerError`."""
env_result = os.environ.get(_ENV_DATA_SERVER_BINARY)
if env_result:
logging.info("Server binary (from env): %s", env_result)
if not os.path.isfile(env_result):
raise NoDataServerError(
"Found environment variable %s=%s, but no such file exists."
% (_ENV_DATA_SERVER_BINARY, env_result)
)
return ServerBinary(env_result, version=None)
bundle_result = os.path.join(os.path.dirname(__file__), "server", "server")
if os.path.exists(bundle_result):
logging.info("Server binary (from bundle): %s", bundle_result)
return ServerBinary(bundle_result, version=None)
try:
import tensorboard_data_server
except ImportError:
pass
else:
pkg_result = tensorboard_data_server.server_binary()
version = tensorboard_data_server.__version__
logging.info(
"Server binary (from Python package v%s): %s", version, pkg_result
)
if pkg_result is None:
raise NoDataServerError(
"TensorBoard data server not supported on this platform."
)
return ServerBinary(pkg_result, version)
raise NoDataServerError(
"TensorBoard data server not found. This mode is experimental. "
"If building from source, pass --define=link_data_server=true."
)