Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TensorBoard 2.12.0 #6192

Merged
merged 7 commits into from
Feb 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 1 addition & 14 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ env:
BUILDTOOLS_VERSION: '3.0.0'
BUILDIFIER_SHA256SUM: 'e92a6793c7134c5431c58fbc34700664f101e5c9b1c1fcd93b97978e8b7f88db'
BUILDOZER_SHA256SUM: '3d58a0b6972e4535718cdd6c12778170ea7382de7c75bc3728f5719437ffb84d'
TENSORFLOW_VERSION: 'tf-nightly'
TENSORFLOW_VERSION: 'tf-nightly==2.12.0.dev20230201'

jobs:
build:
Expand Down Expand Up @@ -115,19 +115,6 @@ jobs:
bazel test //tensorboard/summary/writer:event_file_writer_s3_test &&
bazel test //tensorboard/compat/tensorflow_stub:gfile_fsspec_test &&
bazel test //tensorboard/summary/writer:event_file_writer_fsspec_test
- name: 'Bazel: build Pip package (with TensorFlow only)'
if: matrix.tf_version_id == 'tf'
run: |
./tensorboard/tools/update_version.sh
rm -rf /tmp/tb_nightly_pip_package && mkdir /tmp/tb_nightly_pip_package
bazel run //tensorboard/pip_package:build_pip_package -- /tmp/tb_nightly_pip_package
- name: 'Upload Pip package as an artifact (with TensorFlow only)'
# Prevent uploads when running on forks or non-master branch.
if: matrix.tf_version_id == 'tf' && github.repository == 'tensorflow/tensorboard' && github.ref == 'refs/heads/master'
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1
with:
name: tb-nightly
path: /tmp/tb_nightly_pip_package/*

build-data-server-pip:
runs-on: ${{ matrix.platform }}
Expand Down
26 changes: 26 additions & 0 deletions RELEASE.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,29 @@
# Release 2.12.0

The 2.12 minor series tracks TensorFlow 2.12.

## Features

- Time Series dashboard visualization improvements: (#6137)
- Allows selection of a step or range of steps on a scalar chart, and shows a table with data from those steps under it.
- Enables linking the selected steps across all charts in the Time Series dashboard.
- Time Series dashboard now sorts runs in tooltip by pixel distance (matching the Scalars dashboard) (#6116).
- Fast data loading mode (`--load_fast`, aka “RustBoard”) improvements:
- Supports more ways to authenticate to GCS, including GKE service accounts, via `gcp_auth` (#5939, thanks @Corwinpro).
- Now available on `manylinux2014` platforms (#6101, thanks @adamjstewart).

## Bug Fixes

- Fixes long standing breakage in standalone version of the Projector visualization (#6069).
- Fixes broken help dialog button in projector plugin (#6024, thanks @mromanelli9).
- Fixes a bug in which a deadlock could cause the event writer to hang (#6168, thanks @crassirostris).


## Breaking Changes

- Drops support for Python 3.7 and marks 3.11 as supported (#6144).
- Drops support for protobuf < 3.19.6 and adds support for 4.x (#6147).

# Release 2.11.0

The 2.11 minor series tracks TensorFlow 2.11.
Expand Down
48 changes: 32 additions & 16 deletions tensorboard/compat/proto/config.proto
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ option java_package = "org.tensorflow.framework";
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";

message GPUOptions {
// Fraction of the available GPU memory to allocate for each process.
// Fraction of the total GPU memory to allocate for each process.
// 1 means to allocate all of the GPU memory, 0.5 means the process
// allocates up to ~50% of the available GPU memory.
// allocates up to ~50% of the total GPU memory.
//
// GPU memory is pre-allocated unless the allow_growth option is enabled.
//
Expand Down Expand Up @@ -239,6 +239,19 @@ message GPUOptions {
// hopes that another thread will free up memory in the meantime. Setting
// this to true disables the sleep; instead we'll OOM immediately.
bool disallow_retry_on_allocation_failure = 12;

// Memory limit for "GPU host allocator", aka pinned memory allocator. This
// can also be set via the envvar TF_GPU_HOST_MEM_LIMIT_IN_MB.
float gpu_host_mem_limit_in_mb = 13;

// If true, then the host allocator allocates its max memory all upfront and
// never grows. This can be useful for latency-sensitive systems, because
// growing the GPU host memory pool can be expensive.
//
// You probably only want to use this in combination with
// gpu_host_mem_limit_in_mb, because the default GPU host memory limit is
// quite high.
bool gpu_host_mem_disallow_growth = 14;
}

// Everything inside experimental is subject to change and is not subject
Expand Down Expand Up @@ -582,7 +595,8 @@ message ConfigProto {
// If set, this can be used by the runtime and the Ops for debugging,
// monitoring, etc.
//
// NOTE: This is currently used and propagated only by the direct session.
// NOTE: This is currently used and propagated only by the direct session
// and EagerContext.
SessionMetadata session_metadata = 11;

// If true, the session may treat the graph as being static for optimization
Expand Down Expand Up @@ -615,18 +629,9 @@ message ConfigProto {
MLIR_BRIDGE_ROLLOUT_ENABLED = 1;
// Disabling the MLIR bridge disables it for all graphs in this session.
MLIR_BRIDGE_ROLLOUT_DISABLED = 2;
// Enable the MLIR bridge on a per graph basis based on an analysis of
// the features used in the graph. If the features used by the graph are
// supported by the MLIR bridge, the MLIR bridge will be used to run the
// graph.
MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED = 3;
// Enable the MLIR bridge in a fallback mode on a per graph basis based
// on an analysis of the features used in the graph.
// Running the MLIR bridge in the fallback mode means that it is
// executed and it commits all the changes to the TF graph in case
// of success. And it does not in case of failures and let the old bridge
// to process the TF graph.
MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED = 4;
reserved 3, 4;
reserved "MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED",
"MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED";
}
// Whether to enable the MLIR-based TF->XLA bridge.
MlirBridgeRollout mlir_bridge_rollout = 17;
Expand Down Expand Up @@ -675,7 +680,18 @@ message ConfigProto {
// Distributed coordination service configurations.
CoordinationServiceConfig coordination_config = 23;

// Next: 24
// If true, the session will treat the graph as being non-static for
// optimization purposes.
//
// If this option is set to true when a session is created, the full
// GraphDef will be retained to enable calls to Session::Extend().
// Calling Extend() without setting this flag will result in errors.
//
// This option is meant to replace `optimize_for_static_graph` and it
// aims to negate its value.
bool disable_optimize_for_static_graph = 24;

// Next: 25
}

Experimental experimental = 16;
Expand Down
11 changes: 11 additions & 0 deletions tensorboard/compat/proto/full_type.proto
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,17 @@ enum FullTypeId {
// TFT_ENCODING[TFT_INT32, TFT_STRING] is an integer encoded as string.
TFT_ENCODED = 1004;

// The type of "shape tensors" where the runtime value is the shape of
// some tensor(s), i.e. the output of tf.shape.
// Shape tensors have special, host-only placement, in contrast to
// TFT_TENSOR[TFT_INT32] which is the type of a normal numeric tensor
// with no special placement.
//
// Examples:
// TFT_SHAPE_TENSOR[TFT_INT32] is the most common
// TFT_SHAPE_TENSOR[TFT_INT64] is also allowed
TFT_SHAPE_TENSOR = 1005;

// Type attributes. These always appear in the parametrization of a type,
// never alone. For example, there is no such thing as a "bool" TensorFlow
// object (for now).
Expand Down
3 changes: 2 additions & 1 deletion tensorboard/compat/proto/proto_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,9 @@
("tensorflow/core/framework/", "tensorboard/compat/proto/"),
("tensorflow/core/protobuf/", "tensorboard/compat/proto/"),
("tensorflow/core/profiler/", "tensorboard/compat/proto/"),
("tensorflow/python/framework/", "tensorboard/compat/proto/"),
("tensorflow/core/util/", "tensorboard/compat/proto/"),
("tensorflow/python/framework/", "tensorboard/compat/proto/"),
("tensorflow/tsl/protobuf/", "tensorboard/compat/proto/"),
('package: "tensorflow.tfprof"', 'package: "tensorboard"'),
('package: "tensorflow"', 'package: "tensorboard"'),
('type_name: ".tensorflow.tfprof', 'type_name: ".tensorboard'),
Expand Down
4 changes: 4 additions & 0 deletions tensorboard/compat/proto/tensor.proto
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,10 @@ message TensorProto {

// DT_UINT64
repeated uint64 uint64_val = 17 [packed = true];

// DT_FLOAT8_*, use variable-sized set of bytes
// (i.e. the equivalent of repeated uint8, if such a thing existed).
bytes float8_val = 18;
}

// Protocol buffer representing the serialization format of DT_VARIANT tensors.
Expand Down
3 changes: 2 additions & 1 deletion tensorboard/compat/proto/tfprof_log.proto
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
syntax = "proto3";

package tensorboard;
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/profiler/tfprof_log_go_proto";

import "tensorboard/compat/proto/attr_value.proto";
import "tensorboard/compat/proto/step_stats.proto";

option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/profiler/protos_all_go_proto";

// It specifies the Python callstack that creates an op.
message CodeDef {
repeated Trace traces = 1;
Expand Down
7 changes: 6 additions & 1 deletion tensorboard/compat/proto/types.proto
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ enum DataType {
DT_QINT8 = 11; // Quantized int8
DT_QUINT8 = 12; // Quantized uint8
DT_QINT32 = 13; // Quantized int32
DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops.
DT_BFLOAT16 = 14; // Float32 truncated to 16 bits.
DT_QINT16 = 15; // Quantized int16
DT_QUINT16 = 16; // Quantized uint16
DT_UINT16 = 17;
Expand All @@ -40,6 +40,9 @@ enum DataType {
DT_VARIANT = 21; // Arbitrary C++ data types
DT_UINT32 = 22;
DT_UINT64 = 23;
DT_FLOAT8_E5M2 = 24; // 5 exponent bits, 2 mantissa bits.
DT_FLOAT8_E4M3FN = 25; // 4 exponent bits, 3 mantissa bits, finite-only, with
// 2 NaNs (0bS1111111).

// Do not use! These are only for parameters. Every enum above
// should have a corresponding value below (verified by types_test).
Expand All @@ -66,6 +69,8 @@ enum DataType {
DT_VARIANT_REF = 121;
DT_UINT32_REF = 122;
DT_UINT64_REF = 123;
DT_FLOAT8_E5M2_REF = 124;
DT_FLOAT8_E4M3FN_REF = 125;
}
// DISABLED.ThenChange(
// https://www.tensorflow.org/code/tensorflow/c/tf_datatype.h,
Expand Down
Binary file modified tensorboard/data/server/descriptor.bin
Binary file not shown.
14 changes: 13 additions & 1 deletion tensorboard/data/server/tensorboard.pb.rs

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion tensorboard/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@

"""Contains the version string."""

VERSION = "2.12.0a0"
VERSION = "2.12.0"