diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..4d5bc75f750 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,12 @@ +*.java text eol=lf +*.kt text eol=lf +*.cc text eol=lf +*.h text eol=lf +*.pom text eol=lf + +*.md text eol=lf +*.sh text eol=lf + +*.pbtxt text eol=lf + +*.pb binary \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index 9f23757e83d..fe15687edbf 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -482,6 +482,40 @@ + + com.diffplug.spotless + spotless-maven-plugin + ${spotless.version} + + + + src/gen/**/*.java + src/main/**/*.java + src/test/**/*.java + + + src/main/**/c_api/**/*.java + src/gen/**/c_api/**/*.java + + + + + + format-generated + process-classes + + apply + + + + + src/gen/**/*.java + + + + + + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 74f7efb4623..ee8ab5f53e3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -308,10 +308,12 @@ /** * An API for building operations as {@link Op Op}s - *

- * Any operation wrapper found in the classpath properly annotated as an{@link org.tensorflow.op.annotation.Operator @Operator} is exposed - * by this API or one of its subgroup. + * + *

Any operation wrapper found in the classpath properly annotated as an{@link + * org.tensorflow.op.annotation.Operator @Operator} is exposed by this API or one of its subgroup. + * *

Example usage: + * *

{@code
  * try (Graph g = new Graph()) {
  *   Ops tf = Ops.create(g);
@@ -326,7 +328,7 @@
  *   Operand nine = tf.math.add(four, tf.constant(5));
  *   // Multi-result operations however offer methods to
  *   // select a particular result for use.
- *   Operand result = 
+ *   Operand result =
  *       tf.math.add(tf.unique(s, a).y(), b);
  *   // Optional attributes
  *   tf.linalg.matMul(a, b, MatMul.transposeA(true));
@@ -365,20 +367,20 @@ public final class Ops {
 
   public final SparseOps sparse;
 
-  public final TpuOps tpu;
-
   public final BitwiseOps bitwise;
 
+  public final TpuOps tpu;
+
   public final MathOps math;
 
   public final AudioOps audio;
 
   public final SignalOps signal;
 
-  public final QuantizationOps quantization;
-
   public final TrainOps train;
 
+  public final QuantizationOps quantization;
+
   private final Scope scope;
 
   private Ops(Scope scope) {
@@ -396,20 +398,20 @@ private Ops(Scope scope) {
     random = new RandomOps(this);
     strings = new StringsOps(this);
     sparse = new SparseOps(this);
-    tpu = new TpuOps(this);
     bitwise = new BitwiseOps(this);
+    tpu = new TpuOps(this);
     math = new MathOps(this);
     audio = new AudioOps(this);
     signal = new SignalOps(this);
-    quantization = new QuantizationOps(this);
     train = new TrainOps(this);
+    quantization = new QuantizationOps(this);
   }
 
   /**
-   * Raise a exception to abort the process when called.
-   *  If exit_without_error is true, the process will exit normally,
-   *  otherwise it will exit with a SIGABORT signal.
-   *  

Returns nothing but an exception. + * Raise a exception to abort the process when called. If exit_without_error is true, the process + * will exit normally, otherwise it will exit with a SIGABORT signal. + * + *

Returns nothing but an exception. * * @param options carries optional attribute values * @return a new instance of Abort @@ -419,15 +421,13 @@ public Abort abort(Abort.Options... options) { } /** - * Computes the "logical and" of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the "logical and" of elements across dimensions of a tensor. Reduces {@code + * input} along the dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank + * of the tensor is reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the + * reduced dimensions are retained with length 1. * * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @return a new instance of All */ @@ -436,15 +436,13 @@ public All all(Operand input, Operand axis, All.Option } /** - * Computes the "logical or" of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the "logical or" of elements across dimensions of a tensor. Reduces {@code + * input} along the dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank + * of the tensor is reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the + * reduced dimensions are retained with length 1. * * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @return a new instance of Any */ @@ -527,7 +525,7 @@ public Constant array(float... data) { * * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * sequences of bytes from the last array dimension. * @return the {@code String} constant */ public Constant array(Charset charset, String... data) { @@ -535,24 +533,23 @@ public Constant array(Charset charset, String... data) { } /** - * Asserts that the given condition is true. - * If {@code condition} evaluates to false, print the list of tensors in {@code data}. - * {@code summarize} determines how many entries of the tensors to print. + * Asserts that the given condition is true. If {@code condition} evaluates to false, print the + * list of tensors in {@code data}. {@code summarize} determines how many entries of the tensors + * to print. * * @param condition The condition to evaluate. * @param data The tensors to print out when condition is false. * @param options carries optional attribute values * @return a new instance of AssertThat */ - public AssertThat assertThat(Operand condition, Iterable> data, - AssertThat.Options... options) { + public AssertThat assertThat( + Operand condition, Iterable> data, AssertThat.Options... options) { return AssertThat.create(scope, condition, data, options); } /** - * Update 'ref' by assigning 'value' to it. - * This operation outputs "ref" after the assignment is done. - * This makes it easier to chain operations that need to use the reset value. + * Update 'ref' by assigning 'value' to it. This operation outputs "ref" after the + * assignment is done. This makes it easier to chain operations that need to use the reset value. * * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. May be uninitialized. @@ -561,15 +558,14 @@ public AssertThat assertThat(Operand condition, Iterable> data * @param data type for {@code Assign} output and operands * @return a new instance of Assign */ - public Assign assign(Operand ref, Operand value, - Assign.Options... options) { + public Assign assign( + Operand ref, Operand value, Assign.Options... options) { return Assign.create(scope, ref, value, options); } /** - * Update 'ref' by adding 'value' to it. - * This operation outputs "ref" after the update is done. - * This makes it easier to chain operations that need to use the reset value. + * Update 'ref' by adding 'value' to it. This operation outputs "ref" after the update + * is done. This makes it easier to chain operations that need to use the reset value. * * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. @@ -578,29 +574,27 @@ public Assign assign(Operand ref, Operand value, * @param data type for {@code AssignAdd} output and operands * @return a new instance of AssignAdd */ - public AssignAdd assignAdd(Operand ref, Operand value, - AssignAdd.Options... options) { + public AssignAdd assignAdd( + Operand ref, Operand value, AssignAdd.Options... options) { return AssignAdd.create(scope, ref, value, options); } /** - * Adds a value to the current value of a variable. - * Any ReadVariableOp with a control dependency on this op is guaranteed to - * see the incremented value or a subsequent newer one. + * Adds a value to the current value of a variable. Any ReadVariableOp with a control dependency + * on this op is guaranteed to see the incremented value or a subsequent newer one. * * @param resource handle to the resource in which to store the variable. * @param value the value by which the variable will be incremented. * @return a new instance of AssignAddVariableOp */ - public AssignAddVariableOp assignAddVariableOp(Operand resource, - Operand value) { + public AssignAddVariableOp assignAddVariableOp( + Operand resource, Operand value) { return AssignAddVariableOp.create(scope, resource, value); } /** - * Update 'ref' by subtracting 'value' from it. - * This operation outputs "ref" after the update is done. - * This makes it easier to chain operations that need to use the reset value. + * Update 'ref' by subtracting 'value' from it. This operation outputs "ref" after the + * update is done. This makes it easier to chain operations that need to use the reset value. * * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. @@ -609,48 +603,45 @@ public AssignAddVariableOp assignAddVariableOp(Operand resource * @param data type for {@code AssignSub} output and operands * @return a new instance of AssignSub */ - public AssignSub assignSub(Operand ref, Operand value, - AssignSub.Options... options) { + public AssignSub assignSub( + Operand ref, Operand value, AssignSub.Options... options) { return AssignSub.create(scope, ref, value, options); } /** - * Subtracts a value from the current value of a variable. - * Any ReadVariableOp with a control dependency on this op is guaranteed to - * see the decremented value or a subsequent newer one. + * Subtracts a value from the current value of a variable. Any ReadVariableOp with a control + * dependency on this op is guaranteed to see the decremented value or a subsequent newer one. * * @param resource handle to the resource in which to store the variable. * @param value the value by which the variable will be incremented. * @return a new instance of AssignSubVariableOp */ - public AssignSubVariableOp assignSubVariableOp(Operand resource, - Operand value) { + public AssignSubVariableOp assignSubVariableOp( + Operand resource, Operand value) { return AssignSubVariableOp.create(scope, resource, value); } /** - * Assigns a new value to a variable. - * Any ReadVariableOp with a control dependency on this op is guaranteed to return - * this value or a subsequent newer value of the variable. + * Assigns a new value to a variable. Any ReadVariableOp with a control dependency on this op is + * guaranteed to return this value or a subsequent newer value of the variable. * * @param resource handle to the resource in which to store the variable. * @param value the value to set the new tensor to use. * @return a new instance of AssignVariableOp */ - public AssignVariableOp assignVariableOp(Operand resource, - Operand value) { + public AssignVariableOp assignVariableOp( + Operand resource, Operand value) { return AssignVariableOp.create(scope, resource, value); } /** - * Defines a barrier that persists across different graph executions. - * A barrier represents a key-value map, where each key is a string, and - * each value is a tuple of tensors. - *

At runtime, the barrier contains 'complete' and 'incomplete' - * elements. A complete element has defined tensors for all components of - * its value tuple, and may be accessed using BarrierTakeMany. An - * incomplete element has some undefined components in its value tuple, - * and may be updated using BarrierInsertMany. + * Defines a barrier that persists across different graph executions. A barrier represents a + * key-value map, where each key is a string, and each value is a tuple of tensors. + * + *

At runtime, the barrier contains 'complete' and 'incomplete' elements. A complete element + * has defined tensors for all components of its value tuple, and may be accessed using + * BarrierTakeMany. An incomplete element has some undefined components in its value tuple, and + * may be updated using BarrierInsertMany. * * @param componentTypes The type of each component in a value. * @param options carries optional attribute values @@ -661,13 +652,12 @@ public Barrier barrier(List> componentTypes, Barrier.Opti } /** - * Closes the given barrier. - * This operation signals that no more new elements will be inserted in the - * given barrier. Subsequent InsertMany that try to introduce a new key will fail. - * Subsequent InsertMany operations that just add missing components to already - * existing elements will continue to succeed. Subsequent TakeMany operations will - * continue to succeed if sufficient completed elements remain in the barrier. - * Subsequent TakeMany operations that would block will fail immediately. + * Closes the given barrier. This operation signals that no more new elements will be inserted in + * the given barrier. Subsequent InsertMany that try to introduce a new key will fail. Subsequent + * InsertMany operations that just add missing components to already existing elements will + * continue to succeed. Subsequent TakeMany operations will continue to succeed if sufficient + * completed elements remain in the barrier. Subsequent TakeMany operations that would block will + * fail immediately. * * @param handle The handle to a barrier. * @param options carries optional attribute values @@ -688,21 +678,23 @@ public BarrierIncompleteSize barrierIncompleteSize(Operand handle) { } /** - * For each key, assigns the respective value to the specified component. - * If a key is not found in the barrier, this operation will create a new - * incomplete element. If a key is found in the barrier, and the element - * already has a value at component_index, this operation will fail with - * INVALID_ARGUMENT, and leave the barrier in an undefined state. + * For each key, assigns the respective value to the specified component. If a key is not found in + * the barrier, this operation will create a new incomplete element. If a key is found in the + * barrier, and the element already has a value at component_index, this operation will fail with + * INVALID_ARGUMENT, and leave the barrier in an undefined state. * * @param handle The handle to a barrier. * @param keys A one-dimensional tensor of keys, with length n. - * @param values An any-dimensional tensor of values, which are associated with the - * respective keys. The 0th dimension must have length n. + * @param values An any-dimensional tensor of values, which are associated with the respective + * keys. The 0th dimension must have length n. * @param componentIndex The component of the barrier elements that is being assigned. * @return a new instance of BarrierInsertMany */ - public BarrierInsertMany barrierInsertMany(Operand handle, Operand keys, - Operand values, Long componentIndex) { + public BarrierInsertMany barrierInsertMany( + Operand handle, + Operand keys, + Operand values, + Long componentIndex) { return BarrierInsertMany.create(scope, handle, keys, values, componentIndex); } @@ -717,59 +709,58 @@ public BarrierReadySize barrierReadySize(Operand handle) { } /** - * Takes the given number of completed elements from a barrier. - * This operation concatenates completed-element component tensors along - * the 0th dimension to make a single component tensor. - *

Elements come out of the barrier when they are complete, and in the order - * in which they were placed into the barrier. The indices output provides - * information about the batch in which each element was originally inserted - * into the barrier. + * Takes the given number of completed elements from a barrier. This operation concatenates + * completed-element component tensors along the 0th dimension to make a single component tensor. + * + *

Elements come out of the barrier when they are complete, and in the order in which they were + * placed into the barrier. The indices output provides information about the batch in which each + * element was originally inserted into the barrier. * * @param handle The handle to a barrier. - * @param numElements A single-element tensor containing the number of elements to - * take. + * @param numElements A single-element tensor containing the number of elements to take. * @param componentTypes The type of each component in a value. * @param options carries optional attribute values * @return a new instance of BarrierTakeMany */ - public BarrierTakeMany barrierTakeMany(Operand handle, Operand numElements, - List> componentTypes, BarrierTakeMany.Options... options) { + public BarrierTakeMany barrierTakeMany( + Operand handle, + Operand numElements, + List> componentTypes, + BarrierTakeMany.Options... options) { return BarrierTakeMany.create(scope, handle, numElements, componentTypes, options); } /** - * Batches all input tensors nondeterministically. - * When many instances of this Op are being run concurrently with the same - * container/shared_name in the same device, some will output zero-shaped Tensors - * and others will output Tensors of size up to max_batch_size. - *

All Tensors in in_tensors are batched together (so, for example, labels and - * features should be batched with a single instance of this operation. - *

Each invocation of batch emits an {@code id} scalar which will be used to identify - * this particular invocation when doing unbatch or its gradient. - *

Each op which emits a non-empty batch will also emit a non-empty batch_index - * Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, - * start, and length of elements of each set of Tensors present in batched_tensors. - *

Batched tensors are concatenated along the first dimension, and all tensors in - * in_tensors must have the first dimension of the same size. - *

in_tensors: The tensors to be batched. - * num_batch_threads: Number of scheduling threads for processing batches of work. - * Determines the number of batches processed in parallel. - * max_batch_size: Batch sizes will never be bigger than this. - * batch_timeout_micros: Maximum number of microseconds to wait before outputting - * an incomplete batch. - * allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does - * nothing. Otherwise, supplies a list of batch sizes, causing the op to pad - * batches up to one of those sizes. The entries must increase monotonically, and - * the final entry must equal max_batch_size. - * grad_timeout_micros: The timeout to use for the gradient. See Unbatch. - * batched_tensors: Either empty tensors or a batch of concatenated Tensors. - * batch_index: If out_tensors is non-empty, has information to invert it. - * container: Controls the scope of sharing of this batch. - * id: always contains a scalar with a unique ID for this invocation of Batch. - * shared_name: Concurrently running instances of batch in the same device with the - * same container and shared_name will batch their elements together. If left - * empty, the op name will be used as the shared name. - * T: the types of tensors to be batched. + * Batches all input tensors nondeterministically. When many instances of this Op are being run + * concurrently with the same container/shared_name in the same device, some will output + * zero-shaped Tensors and others will output Tensors of size up to max_batch_size. + * + *

All Tensors in in_tensors are batched together (so, for example, labels and features should + * be batched with a single instance of this operation. + * + *

Each invocation of batch emits an {@code id} scalar which will be used to identify this + * particular invocation when doing unbatch or its gradient. + * + *

Each op which emits a non-empty batch will also emit a non-empty batch_index Tensor, which, + * is a [K, 3] matrix where each row contains the invocation's id, start, and length of elements + * of each set of Tensors present in batched_tensors. + * + *

Batched tensors are concatenated along the first dimension, and all tensors in in_tensors + * must have the first dimension of the same size. + * + *

in_tensors: The tensors to be batched. num_batch_threads: Number of scheduling threads for + * processing batches of work. Determines the number of batches processed in parallel. + * max_batch_size: Batch sizes will never be bigger than this. batch_timeout_micros: Maximum + * number of microseconds to wait before outputting an incomplete batch. allowed_batch_sizes: + * Optional list of allowed batch sizes. If left empty, does nothing. Otherwise, supplies a list + * of batch sizes, causing the op to pad batches up to one of those sizes. The entries must + * increase monotonically, and the final entry must equal max_batch_size. grad_timeout_micros: The + * timeout to use for the gradient. See Unbatch. batched_tensors: Either empty tensors or a batch + * of concatenated Tensors. batch_index: If out_tensors is non-empty, has information to invert + * it. container: Controls the scope of sharing of this batch. id: always contains a scalar with a + * unique ID for this invocation of Batch. shared_name: Concurrently running instances of batch in + * the same device with the same container and shared_name will batch their elements together. If + * left empty, the op name will be used as the shared name. T: the types of tensors to be batched. * * @param inTensors the inTensors value * @param numBatchThreads the value of the numBatchThreads property @@ -779,15 +770,28 @@ public BarrierTakeMany barrierTakeMany(Operand handle, Operand * @param options carries optional attribute values * @return a new instance of Batch */ - public Batch batch(Iterable> inTensors, Long numBatchThreads, Long maxBatchSize, - Long batchTimeoutMicros, Long gradTimeoutMicros, Batch.Options... options) { - return Batch.create(scope, inTensors, numBatchThreads, maxBatchSize, batchTimeoutMicros, gradTimeoutMicros, options); + public Batch batch( + Iterable> inTensors, + Long numBatchThreads, + Long maxBatchSize, + Long batchTimeoutMicros, + Long gradTimeoutMicros, + Batch.Options... options) { + return Batch.create( + scope, + inTensors, + numBatchThreads, + maxBatchSize, + batchTimeoutMicros, + gradTimeoutMicros, + options); } /** - * Batches all the inputs tensors to the computation done by the function. - * So, for example, in the following code - *

+   * Batches all the inputs tensors to the computation done by the function. So, for example, in the
+   * following code
+   *
+   * 
    *
    *  # This input will be captured.
    *  y = tf.placeholder_with_default(1.0, shape=[])
@@ -807,238 +811,258 @@ public Batch batch(Iterable> inTensors, Long numBatchThreads, Long ma
    *          allowed_batch_sizes=[3, 10],
    *          batching_queue="")
    *  
- *

If more than one session.run call is simultaneously trying to compute {@code b} - * the values of {@code a} will be gathered, non-deterministically concatenated - * along the first axis, and only one thread will run the computation. - *

Assumes that all arguments of the function are Tensors which will be batched - * along their first dimension. - *

Arguments that are captured, are not batched. The session.run call which does - * the concatenation, will use the values of the captured tensors available to it. - * Therefore, typical uses of captured tensors should involve values which remain - * unchanged across session.run calls. Inference is a good example of this. - *

SparseTensor is not supported. The return value of the decorated function - * must be a Tensor or a list/tuple of Tensors. + * + *

If more than one session.run call is simultaneously trying to compute {@code b} the values + * of {@code a} will be gathered, non-deterministically concatenated along the first axis, and + * only one thread will run the computation. + * + *

Assumes that all arguments of the function are Tensors which will be batched along their + * first dimension. + * + *

Arguments that are captured, are not batched. The session.run call which does the + * concatenation, will use the values of the captured tensors available to it. Therefore, typical + * uses of captured tensors should involve values which remain unchanged across session.run calls. + * Inference is a good example of this. + * + *

SparseTensor is not supported. The return value of the decorated function must be a Tensor + * or a list/tuple of Tensors. * * @param inTensors The tensors to be batched. - * @param capturedTensors The tensors which are captured in the function, and don't need - * to be batched. + * @param capturedTensors The tensors which are captured in the function, and don't need to be + * batched. * @param f the value of the f property - * @param numBatchThreads Number of scheduling threads for processing batches of work. - * Determines the number of batches processed in parallel. + * @param numBatchThreads Number of scheduling threads for processing batches of work. Determines + * the number of batches processed in parallel. * @param maxBatchSize Batch sizes will never be bigger than this. - * @param batchTimeoutMicros Maximum number of microseconds to wait before outputting - * an incomplete batch. + * @param batchTimeoutMicros Maximum number of microseconds to wait before outputting an + * incomplete batch. * @param Tout the types of the output tensors. * @param options carries optional attribute values * @return a new instance of BatchFunction */ - public BatchFunction batchFunction(Iterable> inTensors, - Iterable> capturedTensors, ConcreteFunction f, Long numBatchThreads, - Long maxBatchSize, Long batchTimeoutMicros, List> Tout, + public BatchFunction batchFunction( + Iterable> inTensors, + Iterable> capturedTensors, + ConcreteFunction f, + Long numBatchThreads, + Long maxBatchSize, + Long batchTimeoutMicros, + List> Tout, BatchFunction.Options... options) { - return BatchFunction.create(scope, inTensors, capturedTensors, f, numBatchThreads, maxBatchSize, batchTimeoutMicros, Tout, options); + return BatchFunction.create( + scope, + inTensors, + capturedTensors, + f, + numBatchThreads, + maxBatchSize, + batchTimeoutMicros, + Tout, + options); } /** - * BatchToSpace for 4-D tensors of type T. - * This is a legacy version of the more general BatchToSpaceND. - *

Rearranges (permutes) data from batch into blocks of spatial data, followed by - * cropping. This is the reverse transformation of SpaceToBatch. More specifically, - * this op outputs a copy of the input tensor where values from the {@code batch} - * dimension are moved in spatial blocks to the {@code height} and {@code width} dimensions, - * followed by cropping along the {@code height} and {@code width} dimensions. + * BatchToSpace for 4-D tensors of type T. This is a legacy version of the more general + * BatchToSpaceND. + * + *

Rearranges (permutes) data from batch into blocks of spatial data, followed by cropping. + * This is the reverse transformation of SpaceToBatch. More specifically, this op outputs a copy + * of the input tensor where values from the {@code batch} dimension are moved in spatial blocks + * to the {@code height} and {@code width} dimensions, followed by cropping along the {@code + * height} and {@code width} dimensions. * * @param data type for {@code output} output - * @param input 4-D tensor with shape - * {@code [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]}. Note that the batch size of the input tensor must be divisible by - * {@code block_size * block_size}. - * @param crops 2-D tensor of non-negative integers with shape {@code [2, 2]}. It specifies - * how many elements to crop from the intermediate result across the spatial - * dimensions as follows: - *

+   * @param input 4-D tensor with shape {@code [batch*block_size*block_size, height_pad/block_size,
+   *     width_pad/block_size, depth]}. Note that the batch size of the input tensor must be
+   *     divisible by {@code block_size * block_size}.
+   * @param crops 2-D tensor of non-negative integers with shape {@code [2, 2]}. It specifies how
+   *     many elements to crop from the intermediate result across the spatial dimensions as
+   *     follows:
+   *     
    *  crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
    *  
+ * * @param blockSize the value of the blockSize property * @param data type for {@code BatchToSpace} output and operands * @return a new instance of BatchToSpace */ - public BatchToSpace batchToSpace(Operand input, - Operand crops, Long blockSize) { + public BatchToSpace batchToSpace( + Operand input, Operand crops, Long blockSize) { return BatchToSpace.create(scope, input, crops, blockSize); } /** - * BatchToSpace for N-D tensors of type T. - * This operation reshapes the "batch" dimension 0 into {@code M + 1} dimensions of shape - * {@code block_shape + [batch]}, interleaves these blocks back into the grid defined by - * the spatial dimensions {@code [1, ..., M]}, to obtain a result with the same rank as - * the input. The spatial dimensions of this intermediate result are then - * optionally cropped according to {@code crops} to produce the output. This is the - * reverse of SpaceToBatch. See below for a precise description. + * BatchToSpace for N-D tensors of type T. This operation reshapes the "batch" dimension + * 0 into {@code M + 1} dimensions of shape {@code block_shape + [batch]}, interleaves these + * blocks back into the grid defined by the spatial dimensions {@code [1, ..., M]}, to obtain a + * result with the same rank as the input. The spatial dimensions of this intermediate result are + * then optionally cropped according to {@code crops} to produce the output. This is the reverse + * of SpaceToBatch. See below for a precise description. * * @param data type for {@code output} output * @param input N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape}, - * where spatial_shape has M dimensions. + * where spatial_shape has M dimensions. * @param blockShape 1-D with shape {@code [M]}, all values must be >= 1. - * @param crops 2-D with shape {@code [M, 2]}, all values must be >= 0. - * {@code crops[i] = [crop_start, crop_end]} specifies the amount to crop from input - * dimension {@code i + 1}, which corresponds to spatial dimension {@code i}. It is - * required that - * {@code crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]}. - *

This operation is equivalent to the following steps: - *

    - *
  1. - *

    Reshape {@code input} to {@code reshaped} of shape: - * [block_shape[0], ..., block_shape[M-1], - * batch / prod(block_shape), - * input_shape[1], ..., input_shape[N-1]] - *

  2. - *
  3. - *

    Permute dimensions of {@code reshaped} to produce {@code permuted} of shape - * [batch / prod(block_shape), - *

    input_shape[1], block_shape[0], - * ..., - * input_shape[M], block_shape[M-1], - *

    input_shape[M+1], ..., input_shape[N-1]] - *

  4. - *
  5. - *

    Reshape {@code permuted} to produce {@code reshaped_permuted} of shape - * [batch / prod(block_shape), - *

    input_shape[1] * block_shape[0], - * ..., - * input_shape[M] * block_shape[M-1], - *

    input_shape[M+1], - * ..., - * input_shape[N-1]] - *

  6. - *
  7. - *

    Crop the start and end of dimensions {@code [1, ..., M]} of - * {@code reshaped_permuted} according to {@code crops} to produce the output of shape: - * [batch / prod(block_shape), - *

    input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], - * ..., - * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], - *

    input_shape[M+1], ..., input_shape[N-1]] - *

  8. - *
- *

Some examples: - *

(1) For the following input of shape {@code [4, 1, 1, 1]}, {@code block_shape = [2, 2]}, and - * {@code crops = [[0, 0], [0, 0]]}: - *

+   * @param crops 2-D with shape {@code [M, 2]}, all values must be >= 0. {@code crops[i] =
+   *     [crop_start, crop_end]} specifies the amount to crop from input dimension {@code i + 1},
+   *     which corresponds to spatial dimension {@code i}. It is required that {@code crop_start[i]
+   *     + crop_end[i] <= block_shape[i] * input_shape[i + 1]}.
+   *     

This operation is equivalent to the following steps: + *

    + *
  1. + *

    Reshape {@code input} to {@code reshaped} of shape: [block_shape[0], ..., + * block_shape[M-1], batch / prod(block_shape), input_shape[1], ..., input_shape[N-1]] + *

  2. + *

    Permute dimensions of {@code reshaped} to produce {@code permuted} of shape [batch + * / prod(block_shape), + *

    input_shape[1], block_shape[0], ..., input_shape[M], block_shape[M-1], + *

    input_shape[M+1], ..., input_shape[N-1]] + *

  3. + *

    Reshape {@code permuted} to produce {@code reshaped_permuted} of shape [batch / + * prod(block_shape), + *

    input_shape[1] * block_shape[0], ..., input_shape[M] * block_shape[M-1], + *

    input_shape[M+1], ..., input_shape[N-1]] + *

  4. + *

    Crop the start and end of dimensions {@code [1, ..., M]} of {@code + * reshaped_permuted} according to {@code crops} to produce the output of shape: [batch + * / prod(block_shape), + *

    input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] * + * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], + *

    input_shape[M+1], ..., input_shape[N-1]] + *

+ *

Some examples: + *

(1) For the following input of shape {@code [4, 1, 1, 1]}, {@code block_shape = [2, 2]}, + * and {@code crops = [[0, 0], [0, 0]]}: + *

    *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
    *  
- *

The output tensor has shape {@code [1, 2, 2, 1]} and value: - *

+   *     

The output tensor has shape {@code [1, 2, 2, 1]} and value: + *

    *  x = [[[[1], [2]], [[3], [4]]]]
    *  
- *

(2) For the following input of shape {@code [4, 1, 1, 3]}, {@code block_shape = [2, 2]}, and - * {@code crops = [[0, 0], [0, 0]]}: - *

+   *     

(2) For the following input of shape {@code [4, 1, 1, 3]}, {@code block_shape = [2, 2]}, + * and {@code crops = [[0, 0], [0, 0]]}: + *

    *  [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
    *  
- *

The output tensor has shape {@code [1, 2, 2, 3]} and value: - *

+   *     

The output tensor has shape {@code [1, 2, 2, 3]} and value: + *

    *  x = [[[[1, 2, 3], [4, 5, 6]],
    *        [[7, 8, 9], [10, 11, 12]]]]
    *  
- *

(3) For the following input of shape {@code [4, 2, 2, 1]}, {@code block_shape = [2, 2]}, and - * {@code crops = [[0, 0], [0, 0]]}: - *

+   *     

(3) For the following input of shape {@code [4, 2, 2, 1]}, {@code block_shape = [2, 2]}, + * and {@code crops = [[0, 0], [0, 0]]}: + *

    *  x = [[[[1], [3]], [[9], [11]]],
    *       [[[2], [4]], [[10], [12]]],
    *       [[[5], [7]], [[13], [15]]],
    *       [[[6], [8]], [[14], [16]]]]
    *  
- *

The output tensor has shape {@code [1, 4, 4, 1]} and value: - *

+   *     

The output tensor has shape {@code [1, 4, 4, 1]} and value: + *

    *  x = [[[[1],   [2],  [3],  [4]],
    *       [[5],   [6],  [7],  [8]],
    *       [[9],  [10], [11],  [12]],
    *       [[13], [14], [15],  [16]]]]
    *  
- *

(4) For the following input of shape {@code [8, 1, 3, 1]}, {@code block_shape = [2, 2]}, and - * {@code crops = [[0, 0], [2, 0]]}: - *

+   *     

(4) For the following input of shape {@code [8, 1, 3, 1]}, {@code block_shape = [2, 2]}, + * and {@code crops = [[0, 0], [2, 0]]}: + *

    *  x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
    *       [[[0], [2], [4]]], [[[0], [10], [12]]],
    *       [[[0], [5], [7]]], [[[0], [13], [15]]],
    *       [[[0], [6], [8]]], [[[0], [14], [16]]]]
    *  
- *

The output tensor has shape {@code [2, 2, 4, 1]} and value: - *

+   *     

The output tensor has shape {@code [2, 2, 4, 1]} and value: + *

    *  x = [[[[1],   [2],  [3],  [4]],
    *        [[5],   [6],  [7],  [8]]],
    *       [[[9],  [10], [11],  [12]],
    *        [[13], [14], [15],  [16]]]]
    *  
+ * * @param data type for {@code BatchToSpaceND} output and operands * @return a new instance of BatchToSpaceNd */ - public BatchToSpaceNd batchToSpaceNd(Operand input, - Operand blockShape, Operand crops) { + public BatchToSpaceNd batchToSpaceNd( + Operand input, Operand blockShape, Operand crops) { return BatchToSpaceNd.create(scope, input, blockShape, crops); } /** - * Bitcasts a tensor from one type to another without copying data. - * Given a tensor {@code input}, this operation returns a tensor that has the same buffer - * data as {@code input} with datatype {@code type}. - *

If the input datatype {@code T} is larger than the output datatype {@code type} then the - * shape changes from [...] to [..., sizeof({@code T})/sizeof({@code type})]. - *

If {@code T} is smaller than {@code type}, the operator requires that the rightmost - * dimension be equal to sizeof({@code type})/sizeof({@code T}). The shape then goes from - * [..., sizeof({@code type})/sizeof({@code T})] to [...]. - *

tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype - * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() - * gives module error. - * For example, - *

Example 1: - *

- *
- *
- *

a = [1., 2., 3.] - * equality_bitcast = tf.bitcast(a, tf.complex128) - * Traceback (most recent call last): - * ... - * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] - * equality_cast = tf.cast(a, tf.complex128) - * print(equality_cast) - * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) - *

- *
- *
- *

Example 2: - *

- *
- *
- *

tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) - * <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)> - *

- *
- *
- *

Example 3: - *

- *
- *
- *

x = [1., 2., 3.] - * y = [0., 2., 3.] - * equality= tf.equal(x,y) - * equality_cast = tf.cast(equality,tf.float32) - * equality_bitcast = tf.bitcast(equality_cast,tf.uint8) - * print(equality) - * tf.Tensor([False True True], shape=(3,), dtype=bool) - * print(equality_cast) - * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) - * print(equality_bitcast) - * tf.Tensor( - * [[ 0 0 0 0] - * [ 0 0 128 63] - * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) - *

- *
- *
- *

NOTE: Bitcast is implemented as a low-level cast, so machines with different - * endian orderings will give different results. + * Bitcasts a tensor from one type to another without copying data. Given a tensor {@code input}, + * this operation returns a tensor that has the same buffer data as {@code input} with datatype + * {@code type}. + * + *

If the input datatype {@code T} is larger than the output datatype {@code type} then the + * shape changes from [...] to [..., sizeof({@code T})/sizeof({@code type})]. + * + *

If {@code T} is smaller than {@code type}, the operator requires that the rightmost + * dimension be equal to sizeof({@code type})/sizeof({@code T}). The shape then goes from [..., + * sizeof({@code type})/sizeof({@code T})] to [...]. + * + *

tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype + * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() + * gives module error. For example, + * + *

Example 1: + * + *

+ * + *
+ * + *
+ * + *

a = [1., 2., 3.] equality_bitcast = tf.bitcast(a, tf.complex128) Traceback (most recent call + * last): ... InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] equality_cast = + * tf.cast(a, tf.complex128) print(equality_cast) tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), + * dtype=complex128) + * + *

+ * + *
+ * + *
+ * + *

Example 2: + * + *

+ * + *
+ * + *
+ * + *

tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) <tf.Tensor: shape=(4,), + * dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)> + * + *

+ * + *
+ * + *
+ * + *

Example 3: + * + *

+ * + *
+ * + *
+ * + *

x = [1., 2., 3.] y = [0., 2., 3.] equality= tf.equal(x,y) equality_cast = + * tf.cast(equality,tf.float32) equality_bitcast = tf.bitcast(equality_cast,tf.uint8) + * print(equality) tf.Tensor([False True True], shape=(3,), dtype=bool) print(equality_cast) + * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) print(equality_bitcast) tf.Tensor( [[ 0 0 0 0] + * [ 0 0 128 63] [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + * + *

+ * + *
+ * + *
+ * + *

NOTE: Bitcast is implemented as a low-level cast, so machines with different endian + * orderings will give different results. * * @param data type for {@code output} output * @param input the input value @@ -1051,47 +1075,49 @@ public Bitcast bitcast(Operand input, Clas } /** - * Apply boolean mask to tensor. Returns the flat array of each element corresponding to a {@code true} in the mask. - *

- * Numpy equivalent is {@code tensor[mask]}. - *

- * In general, {@code 0 < dim(mask) = K <= dim(tensor)}, and {@code mask}'s shape must match - * the first K dimensions of {@code tensor}'s shape. We then have: - * {@code booleanMask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]} - * where {@code (i1,...,iK)} is the ith {@code true} entry of {@code mask} (row-major order). - *

- * The {@code axis} could be used with {@code mask} to indicate the axis to mask from (it's 0 by default). - * In that case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape must match - * the first {@code axis + dim(mask)} dimensions of {@code tensor}'s shape. + * Apply boolean mask to tensor. Returns the flat array of each element corresponding to a {@code + * true} in the mask. + * + *

Numpy equivalent is {@code tensor[mask]}. + * + *

In general, {@code 0 < dim(mask) = K <= dim(tensor)}, and {@code mask}'s shape must match + * the first K dimensions of {@code tensor}'s shape. We then have: {@code booleanMask(tensor, + * mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]} where {@code (i1,...,iK)} is the ith {@code + * true} entry of {@code mask} (row-major order). + * + *

The {@code axis} could be used with {@code mask} to indicate the axis to mask from (it's 0 + * by default). In that case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape + * must match the first {@code axis + dim(mask)} dimensions of {@code tensor}'s shape. * * @param tensor The tensor to mask. * @param mask The mask to apply. * @param options carries optional attributes values * @return The masked tensor. */ - public Operand booleanMask(Operand tensor, Operand mask, - BooleanMask.Options... options) { + public Operand booleanMask( + Operand tensor, Operand mask, BooleanMask.Options... options) { return BooleanMask.create(scope, tensor, mask, options); } /** - * Updates a tensor at the masked values, and returns the updated tensor. Does not mutate the input tensors. {@code - * updates} will be broadcasted by default - *

- * Numpy equivalent is `tensor[mask] = updates`. - *

- * In general, {@code 0 < dim(mask) = K <= dim(tensor)}, and {@code mask}'s shape must match the first K dimensions of - * {@code tensor}'s shape. We then have: {@code booleanMask(tensor, mask)[i, j1,...,jd] = - * tensor[i1,...,iK,j1,...,jd]} where {@code (i1,...,iK)} is the ith {@code true} entry of {@code mask} (row-major - * order). - *

- * The {@code axis} could be used with {@code mask} to indicate the axis to mask from (it's 0 by default). In that - * case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape must match the first {@code axis + - * dim(mask)} dimensions of {@code tensor}'s shape. - *

- * The shape of {@code updates} should be {@code [n, t_1, t_2, ...]} where {@code n} is the number of true values in - * {@code mask} and {@code t_i} is the {@code i}th dimension of {@code tensor} after {@code axis} and {@code mask}. - * {@code updates} will be broadcasted to this shape by default, which can be disabled using {@code options}. + * Updates a tensor at the masked values, and returns the updated tensor. Does not mutate the + * input tensors. {@code updates} will be broadcasted by default + * + *

Numpy equivalent is `tensor[mask] = updates`. + * + *

In general, {@code 0 < dim(mask) = K <= dim(tensor)}, and {@code mask}'s shape must match + * the first K dimensions of {@code tensor}'s shape. We then have: {@code booleanMask(tensor, + * mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]} where {@code (i1,...,iK)} is the ith {@code + * true} entry of {@code mask} (row-major order). + * + *

The {@code axis} could be used with {@code mask} to indicate the axis to mask from (it's 0 + * by default). In that case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape + * must match the first {@code axis + dim(mask)} dimensions of {@code tensor}'s shape. + * + *

The shape of {@code updates} should be {@code [n, t_1, t_2, ...]} where {@code n} is the + * number of true values in {@code mask} and {@code t_i} is the {@code i}th dimension of {@code + * tensor} after {@code axis} and {@code mask}. {@code updates} will be broadcasted to this shape + * by default, which can be disabled using {@code options}. * * @param tensor The tensor to mask. * @param mask The mask to apply. @@ -1099,15 +1125,18 @@ public Operand booleanMask(Operand tensor, Operand Operand booleanMaskUpdate(Operand tensor, Operand mask, - Operand updates, BooleanMaskUpdate.Options... options) { + public Operand booleanMaskUpdate( + Operand tensor, + Operand mask, + Operand updates, + BooleanMaskUpdate.Options... options) { return BooleanMaskUpdate.create(scope, tensor, mask, updates, options); } /** - * Return the shape of s0 op s1 with broadcast. - * Given {@code s0} and {@code s1}, tensors that represent shapes, compute {@code r0}, the - * broadcasted shape. {@code s0}, {@code s1} and {@code r0} are all integer vectors. + * Return the shape of s0 op s1 with broadcast. Given {@code s0} and {@code s1}, tensors that + * represent shapes, compute {@code r0}, the broadcasted shape. {@code s0}, {@code s1} and {@code + * r0} are all integer vectors. * * @param data type for {@code r0} output * @param s0 the s0 value @@ -1115,41 +1144,43 @@ public Operand booleanMaskUpdate(Operand tensor, Operand * @param data type for {@code BroadcastArgs} output and operands * @return a new instance of BroadcastDynamicShape */ - public BroadcastDynamicShape broadcastDynamicShape(Operand s0, - Operand s1) { + public BroadcastDynamicShape broadcastDynamicShape( + Operand s0, Operand s1) { return BroadcastDynamicShape.create(scope, s0, s1); } /** - * Broadcast an array for a compatible shape. - * Broadcasting is the process of making arrays to have compatible shapes - * for arithmetic operations. Two shapes are compatible if for each - * dimension pair they are either equal or one of them is one. When trying - * to broadcast a Tensor to a shape, it starts with the trailing dimensions, - * and works its way forward. - *

For example, - *

- *
- *
- *

x = tf.constant([1, 2, 3]) - * y = tf.broadcast_to(x, [3, 3]) - * print(y) - * tf.Tensor( - * [[1 2 3] - * [1 2 3] - * [1 2 3]], shape=(3, 3), dtype=int32) - *

- *
- *
- *

In the above example, the input Tensor with the shape of {@code [1, 3]} - * is broadcasted to output Tensor with shape of {@code [3, 3]}. - *

When doing broadcasted operations such as multiplying a tensor - * by a scalar, broadcasting (usually) confers some time or space - * benefit, as the broadcasted tensor is never materialized. - *

However, {@code broadcast_to} does not carry with it any such benefits. - * The newly-created tensor takes the full memory of the broadcasted - * shape. (In a graph context, {@code broadcast_to} might be fused to - * subsequent operation and then be optimized away, however.) + * Broadcast an array for a compatible shape. Broadcasting is the process of making arrays to have + * compatible shapes for arithmetic operations. Two shapes are compatible if for each dimension + * pair they are either equal or one of them is one. When trying to broadcast a Tensor to a shape, + * it starts with the trailing dimensions, and works its way forward. + * + *

For example, + * + *

+ * + *
+ * + *
+ * + *

x = tf.constant([1, 2, 3]) y = tf.broadcast_to(x, [3, 3]) print(y) tf.Tensor( [[1 2 3] [1 2 + * 3] [1 2 3]], shape=(3, 3), dtype=int32) + * + *

+ * + *
+ * + *
+ * + *

In the above example, the input Tensor with the shape of {@code [1, 3]} is broadcasted to + * output Tensor with shape of {@code [3, 3]}. + * + *

When doing broadcasted operations such as multiplying a tensor by a scalar, broadcasting + * (usually) confers some time or space benefit, as the broadcasted tensor is never materialized. + * + *

However, {@code broadcast_to} does not carry with it any such benefits. The newly-created + * tensor takes the full memory of the broadcasted shape. (In a graph context, {@code + * broadcast_to} might be fused to subsequent operation and then be optimized away, however.) * * @param data type for {@code output} output * @param input A Tensor to broadcast. @@ -1157,22 +1188,16 @@ public BroadcastDynamicShape broadcastDynamicShape(Operan * @param data type for {@code BroadcastTo} output and operands * @return a new instance of BroadcastTo */ - public BroadcastTo broadcastTo(Operand input, - Operand shape) { + public BroadcastTo broadcastTo( + Operand input, Operand shape) { return BroadcastTo.create(scope, input, shape); } /** - * Bucketizes 'input' based on 'boundaries'. - * For example, if the inputs are - * boundaries = [0, 10, 100] - * input = [[-5, 10000] - * [150, 10] - * [5, 100]] - *

then the output will be - * output = [[0, 3] - * [3, 2] - * [1, 3]] + * Bucketizes 'input' based on 'boundaries'. For example, if the inputs are boundaries = [0, 10, + * 100] input = [[-5, 10000] [150, 10] [5, 100]] + * + *

then the output will be output = [[0, 3] [3, 2] [1, 3]] * * @param input Any shape of Tensor contains with int or float type. * @param boundaries A sorted list of floats gives the boundary of the buckets. @@ -1184,7 +1209,7 @@ public Bucketize bucketize(Operand input, List boundar /** * Calls the function in an execution environment, adding its graph as a function if it isn't - * already present. Only works for functions with a single input and output. + * already present. Only works for functions with a single input and output. * * @param argument the argument to the call * @return the output of the function @@ -1196,20 +1221,21 @@ public Operand call(ConcreteFunction function, Operand argument) { /** * Calls the function in an execution environment, adding its graph as a function if it isn't - * already present. The inputs and outputs are keyed by the names set in the {@code Signature}. + * already present. The inputs and outputs are keyed by the names set in the {@code Signature}. * * @param arguments the arguments to the call * @return the outputs of the function * @see ConcreteFunction#call(Ops, Map) */ - public Map> call(ConcreteFunction function, - Map> arguments) { + public Map> call( + ConcreteFunction function, Map> arguments) { return Function.call(scope, function, arguments); } /** * An n-way switch statement which calls a single branch function. - *

+   *
+   * 
    *  An n-way switch statement, implementing the following:
    *  ```
    *  switch (branch_index) {
@@ -1228,41 +1254,48 @@ public Map> call(ConcreteFunction function,
    *  ```
    *  
* - *

Selects between {@link StatefulCase} and {@link StatelessCase} based on the statefulness of the function arguments. + *

Selects between {@link StatefulCase} and {@link StatelessCase} based on the statefulness of + * the function arguments. * * @param branchIndex The branch selector, an int32 Tensor. * @param input A list of input tensors passed to the branch function. * @param Tout A list of output types. - * @param branches

+   * @param branches
+   *     
    *    A list of functions each of which takes 'inputs' and returns a list of
    *    tensors, whose types are the same as what every other branch returns.
    *  
+ * * @param options carries optional attribute values * @return a new instance of Case */ - public Case caseOp(Operand branchIndex, Iterable> input, - List> Tout, List branches, Case.Options... options) { + public Case caseOp( + Operand branchIndex, + Iterable> input, + List> Tout, + List branches, + Case.Options... options) { return Case.create(scope, branchIndex, input, Tout, branches, options); } /** - * Clips tensor values to a specified min and max. - * Given a tensor {@code t}, this operation returns a tensor of the same type and - * shape as {@code t} with its values clipped to {@code clip_value_min} and {@code clip_value_max}. - * Any values less than {@code clip_value_min} are set to {@code clip_value_min}. Any values - * greater than {@code clip_value_max} are set to {@code clip_value_max}. + * Clips tensor values to a specified min and max. Given a tensor {@code t}, this operation + * returns a tensor of the same type and shape as {@code t} with its values clipped to {@code + * clip_value_min} and {@code clip_value_max}. Any values less than {@code clip_value_min} are set + * to {@code clip_value_min}. Any values greater than {@code clip_value_max} are set to {@code + * clip_value_max}. * * @param data type for {@code output} output * @param t A {@code Tensor}. - * @param clipValueMin A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape - * as {@code t}. The minimum value to clip by. - * @param clipValueMax A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape - * as {@code t}. The maximum value to clip by. + * @param clipValueMin A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape as + * {@code t}. The minimum value to clip by. + * @param clipValueMax A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape as + * {@code t}. The maximum value to clip by. * @param data type for {@code ClipByValue} output and operands * @return a new instance of ClipByValue */ - public ClipByValue clipByValue(Operand t, Operand clipValueMin, - Operand clipValueMax) { + public ClipByValue clipByValue( + Operand t, Operand clipValueMin, Operand clipValueMax) { return ClipByValue.create(scope, t, clipValueMin, clipValueMax); } @@ -1270,15 +1303,15 @@ public ClipByValue clipByValue(Operand t, Operand cli * Concatenates tensors along one dimension. * * @param data type for {@code output} output - * @param values List of {@code N} Tensors to concatenate. Their ranks and types must match, - * and their sizes must match in all dimensions except {@code concat_dim}. - * @param axis 0-D. The dimension along which to concatenate. Must be in the - * range [-rank(values), rank(values)). + * @param values List of {@code N} Tensors to concatenate. Their ranks and types must match, and + * their sizes must match in all dimensions except {@code concat_dim}. + * @param axis 0-D. The dimension along which to concatenate. Must be in the range [-rank(values), + * rank(values)). * @param data type for {@code ConcatV2} output and operands * @return a new instance of Concat */ - public Concat concat(Iterable> values, - Operand axis) { + public Concat concat( + Iterable> values, Operand axis) { return Concat.create(scope, values, axis); } @@ -1296,7 +1329,7 @@ public Constant constant(int data) { * Creates a rank-3 constant of {@code double} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a double constant */ public Constant constant(double[][][] data) { @@ -1307,7 +1340,7 @@ public Constant constant(double[][][] data) { * Creates a rank-5 constant of {@code byte} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a byte constant */ public Constant constant(byte[][][][][] data) { @@ -1316,7 +1349,7 @@ public Constant constant(byte[][][][][] data) { /** * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, - * using the default UTF-8 encoding. + * using the default UTF-8 encoding. * * @param data an n-dimensional array of {@code String} elements. * @return a string constant @@ -1329,7 +1362,7 @@ public Constant constant(NdArray data) { * Creates a rank-4 constant of {@code int} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return an integer constant */ public Constant constant(int[][][][] data) { @@ -1350,7 +1383,7 @@ public Constant constant(byte data) { * Creates a rank-2 constant of {@code long} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a long constant */ public Constant constant(long[][] data) { @@ -1361,7 +1394,7 @@ public Constant constant(long[][] data) { * Creates a rank-6 constant of {@code float} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a float constant */ public Constant constant(float[][][][][][] data) { @@ -1372,7 +1405,7 @@ public Constant constant(float[][][][][][] data) { * Creates a rank-6 constant of {@code boolean} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a boolean constant */ public Constant constant(boolean[][][][][][] data) { @@ -1383,7 +1416,7 @@ public Constant constant(boolean[][][][][][] data) { * Creates a rank-4 constant of {@code boolean} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a boolean constant */ public Constant constant(boolean[][][][] data) { @@ -1394,7 +1427,7 @@ public Constant constant(boolean[][][][] data) { * Creates a rank-3 constant of {@code float} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a float constant */ public Constant constant(float[][][] data) { @@ -1405,7 +1438,7 @@ public Constant constant(float[][][] data) { * Creates a rank-5 constant of {@code float} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a float constant */ public Constant constant(float[][][][][] data) { @@ -1416,7 +1449,7 @@ public Constant constant(float[][][][][] data) { * Creates a rank-5 constant of {@code long} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a long constant */ public Constant constant(long[][][][][] data) { @@ -1427,7 +1460,7 @@ public Constant constant(long[][][][][] data) { * Creates a rank-1 constant of {@code int} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return an integer constant */ public Constant constant(int[] data) { @@ -1438,7 +1471,7 @@ public Constant constant(int[] data) { * Creates a rank-2 constant of {@code float} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a float constant */ public Constant constant(float[][] data) { @@ -1449,7 +1482,7 @@ public Constant constant(float[][] data) { * Creates a rank-2 constant of {@code boolean} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a boolean constant */ public Constant constant(boolean[][] data) { @@ -1510,7 +1543,7 @@ public Constant constant(BooleanNdArray data) { * Creates a rank-1 constant of {@code double} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a double constant */ public Constant constant(double[] data) { @@ -1531,7 +1564,7 @@ public Constant constant(LongNdArray data) { * Creates a rank-1 constant of {@code float} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a float constant */ public Constant constant(float[] data) { @@ -1542,7 +1575,7 @@ public Constant constant(float[] data) { * Creates a rank-3 constant of {@code long} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a long constant */ public Constant constant(long[][][] data) { @@ -1553,7 +1586,7 @@ public Constant constant(long[][][] data) { * Creates a rank-3 constant of {@code boolean} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a boolean constant */ public Constant constant(boolean[][][] data) { @@ -1564,7 +1597,7 @@ public Constant constant(boolean[][][] data) { * Creates a rank-1 constant of {@code byte} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a byte constant */ public Constant constant(byte[] data) { @@ -1575,7 +1608,7 @@ public Constant constant(byte[] data) { * Creates a rank-3 constant of {@code int} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return an integer constant */ public Constant constant(int[][][] data) { @@ -1596,7 +1629,7 @@ public Constant constant(IntNdArray data) { * Creates a rank-1 constant of {@code long} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a long constant */ public Constant constant(long[] data) { @@ -1617,7 +1650,7 @@ public Constant constant(FloatNdArray data) { * Creates a rank-5 constant of {@code int} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return an integer constant */ public Constant constant(int[][][][][] data) { @@ -1628,7 +1661,7 @@ public Constant constant(int[][][][][] data) { * Creates a rank-5 constant of {@code double} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a double constant */ public Constant constant(double[][][][][] data) { @@ -1639,7 +1672,7 @@ public Constant constant(double[][][][][] data) { * Creates a rank-5 constant of {@code boolean} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a boolean constant */ public Constant constant(boolean[][][][][] data) { @@ -1650,7 +1683,7 @@ public Constant constant(boolean[][][][][] data) { * Creates a rank-6 constant of {@code int} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return an integer constant */ public Constant constant(int[][][][][][] data) { @@ -1671,7 +1704,7 @@ public Constant constant(DoubleNdArray data) { * Creates a rank-6 constant of {@code double} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a double constant */ public Constant constant(double[][][][][][] data) { @@ -1682,7 +1715,7 @@ public Constant constant(double[][][][][][] data) { * Creates a rank-6 constant of {@code long} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a long constant */ public Constant constant(long[][][][][][] data) { @@ -1693,7 +1726,7 @@ public Constant constant(long[][][][][][] data) { * Creates a rank-2 constant of {@code int} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return an integer constant */ public Constant constant(int[][] data) { @@ -1704,7 +1737,7 @@ public Constant constant(int[][] data) { * Creates a rank-1 constant of {@code boolean} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a boolean constant */ public Constant constant(boolean[] data) { @@ -1725,7 +1758,7 @@ public Constant constant(float data) { * Creates a rank-4 constant of {@code byte} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a byte constant */ public Constant constant(byte[][][][] data) { @@ -1736,7 +1769,7 @@ public Constant constant(byte[][][][] data) { * Creates a rank-4 constant of {@code float} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a float constant */ public Constant constant(float[][][][] data) { @@ -1757,7 +1790,7 @@ public Constant constant(ByteNdArray data) { * Creates a rank-6 constant of {@code byte} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a byte constant */ public Constant constant(byte[][][][][][] data) { @@ -1768,7 +1801,7 @@ public Constant constant(byte[][][][][][] data) { * Creates a rank-4 constant of {@code long} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a long constant */ public Constant constant(long[][][][] data) { @@ -1779,7 +1812,7 @@ public Constant constant(long[][][][] data) { * Creates a rank-2 constant of {@code byte} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a byte constant */ public Constant constant(byte[][] data) { @@ -1790,7 +1823,7 @@ public Constant constant(byte[][] data) { * Creates a rank-2 constant of {@code double} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a double constant */ public Constant constant(double[][] data) { @@ -1801,7 +1834,7 @@ public Constant constant(double[][] data) { * Creates a rank-3 constant of {@code byte} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a byte constant */ public Constant constant(byte[][][] data) { @@ -1812,7 +1845,7 @@ public Constant constant(byte[][][] data) { * Creates a rank-4 constant of {@code double} elements. * * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * new constant will match those of the array. * @return a double constant */ public Constant constant(double[][][][] data) { @@ -1821,7 +1854,7 @@ public Constant constant(double[][][][] data) { /** * Creates a rank-1 constant of {@code long} elements representing the size of each dimensions of - * the given shape. + * the given shape. * * @param shape a shape * @return a long constant @@ -1832,7 +1865,7 @@ public Constant constant(Shape shape) { /** * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, - * using the given encoding. + * using the given encoding. * * @param charset charset used to encode/decode string bytes. * @param data an n-dimensional array of {@code String} elements. @@ -1847,7 +1880,7 @@ public Constant constant(Charset charset, NdArray data) { * * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * sequences of bytes from the last array dimension. * @return the {@code String} constant */ public Constant constant(Charset charset, String[] data) { @@ -1879,7 +1912,7 @@ public Constant constant(Shape shape, BooleanDataBuffer data) { /** * Create a {@link TString} constant with data from the given buffer, using the default UTF-8 - * encoding. + * encoding. * * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -1952,14 +1985,14 @@ public Constant constant(Shape shape, FloatDataBuffer data) { /** * Creates a scalar of {@code type}, with the value of {@code number}. {@code number} may be - * truncated if it does not fit in the target type. + * truncated if it does not fit in the target type. * * @param type the type of tensor to create. Must be concrete (i.e. not {@link - * org.tensorflow.types.family.TFloating}) + * org.tensorflow.types.family.TFloating}) * @param number the value of the tensor * @return a constant of the passed type * @throws IllegalArgumentException if the type is abstract (i.e. {@link - * org.tensorflow.types.family.TFloating}) or unknown. + * org.tensorflow.types.family.TFloating}) or unknown. */ public Constant constant(Class type, Number number) { return Constant.tensorOf(scope, type, number); @@ -1987,7 +2020,7 @@ public Constant constant(Charset charset, Shape shape, DataBuffer Constant constant(Class type, Shape shape, ByteDataBuffer data) { return Constant.tensorOf(scope, type, shape, data); @@ -1995,11 +2028,11 @@ public Constant constant(Class type, Shape shape, ByteDa /** * Create a constant by making an immutable copy of {@code tensor}. {@code tensor} may be closed - * afterwards without issue. + * afterwards without issue. * - *

Note: this endpoint cannot be simply called {@code constant} since it will conflict with - * other endpoints accepting an NdArray in parameter {e.g. {@link #tensorOf(Scope, - * FloatNdArray)}}. + *

Note: this endpoint cannot be simply called {@code constant} since it will conflict with + * other endpoints accepting an NdArray in parameter {e.g. {@link #tensorOf(Scope, + * FloatNdArray)}}. * * @param tensor a Tensor holding the constant value * @return a constant of the same data type as `tensor` @@ -2010,7 +2043,7 @@ public Constant constantOf(T tensor) { /** * Creates a scalar of the same type as {@code toMatch}, with the value of {@code number}. {@code - * number} may be truncated if it does not fit in the target type. + * number} may be truncated if it does not fit in the target type. * * @param toMatch the operand providing the target type * @param number the value of the tensor @@ -2023,13 +2056,13 @@ public Constant constantOfSameType(Operand toMatch, Nu } /** - * This op consumes a lock created by {@code MutexLock}. - * This op exists to consume a tensor created by {@code MutexLock} (other than - * direct control dependencies). It should be the only that consumes the tensor, - * and will raise an error if it is not. Its only purpose is to keep the - * mutex lock tensor alive until it is consumed by this op. - *

NOTE: This operation must run on the same device as its input. This may - * be enforced via the {@code colocate_with} mechanism. + * This op consumes a lock created by {@code MutexLock}. This op exists to consume a tensor + * created by {@code MutexLock} (other than direct control dependencies). It should be the only + * that consumes the tensor, and will raise an error if it is not. Its only purpose is to keep the + * mutex lock tensor alive until it is consumed by this op. + * + *

NOTE: This operation must run on the same device as its input. This may be + * enforced via the {@code colocate_with} mechanism. * * @param mutexLock A tensor returned by {@code MutexLock}. * @return a new instance of ConsumeMutexLock @@ -2039,8 +2072,8 @@ public ConsumeMutexLock consumeMutexLock(Operand mutexLock) { } /** - * Does nothing. Serves as a control trigger for scheduling. - * Only useful as a placeholder for control edges. + * Does nothing. Serves as a control trigger for scheduling. Only useful as a placeholder for + * control edges. * * @return a new instance of ControlTrigger */ @@ -2053,8 +2086,8 @@ public ControlTrigger controlTrigger() { * * @param data type for {@code output} output * @param ref Should be from a scalar {@code Variable} node. - * @param limit If incrementing ref would bring it above limit, instead generates an - * 'OutOfRange' error. + * @param limit If incrementing ref would bring it above limit, instead generates an 'OutOfRange' + * error. * @param data type for {@code CountUpTo} output and operands * @return a new instance of CountUpTo */ @@ -2063,69 +2096,72 @@ public CountUpTo countUpTo(Operand ref, Long limit) { } /** - * The op extracts fields from a serialized protocol buffers message into tensors. - * The {@code decode_proto} op extracts fields from a serialized protocol buffers - * message into tensors. The fields in {@code field_names} are decoded and converted - * to the corresponding {@code output_types} if possible. - *

A {@code message_type} name must be provided to give context for the field names. - * The actual message descriptor can be looked up either in the linked-in - * descriptor pool or a filename provided by the caller using the - * {@code descriptor_source} attribute. - *

Each output tensor is a dense tensor. This means that it is padded to hold - * the largest number of repeated elements seen in the input minibatch. (The - * shape is also padded by one to prevent zero-sized dimensions). The actual - * repeat counts for each example in the minibatch can be found in the {@code sizes} - * output. In many cases the output of {@code decode_proto} is fed immediately into - * tf.squeeze if missing values are not a concern. When using tf.squeeze, always - * pass the squeeze dimension explicitly to avoid surprises. - *

For the most part, the mapping between Proto field types and TensorFlow dtypes - * is straightforward. However, there are a few special cases: - *

    - *
  • - *

    A proto field that contains a submessage or group can only be converted - * to {@code DT_STRING} (the serialized submessage). This is to reduce the complexity - * of the API. The resulting string can be used as input to another instance of - * the decode_proto op. - *

  • - *
  • - *

    TensorFlow lacks support for unsigned integers. The ops represent uint64 - * types as a {@code DT_INT64} with the same twos-complement bit pattern (the obvious - * way). Unsigned int32 values can be represented exactly by specifying type - * {@code DT_INT64}, or using twos-complement if the caller specifies {@code DT_INT32} in - * the {@code output_types} attribute. - *

  • - *
- *

Both binary and text proto serializations are supported, and can be - * chosen using the {@code format} attribute. - *

The {@code descriptor_source} attribute selects the source of protocol - * descriptors to consult when looking up {@code message_type}. This may be: - *

    - *
  • - *

    An empty string or "local://", in which case protocol descriptors are - * created for C++ (not Python) proto definitions linked to the binary. - *

  • - *
  • - *

    A file, in which case protocol descriptors are created from the file, - * which is expected to contain a {@code FileDescriptorSet} serialized as a string. - * NOTE: You can build a {@code descriptor_source} file using the {@code --descriptor_set_out} - * and {@code --include_imports} options to the protocol compiler {@code protoc}. - *

  • - *
  • - *

    A "bytes://<bytes>", in which protocol descriptors are created from {@code }, - * which is expected to be a {@code FileDescriptorSet} serialized as a string. - *

  • - *
+ * The op extracts fields from a serialized protocol buffers message into tensors. The {@code + * decode_proto} op extracts fields from a serialized protocol buffers message into tensors. The + * fields in {@code field_names} are decoded and converted to the corresponding {@code + * output_types} if possible. + * + *

A {@code message_type} name must be provided to give context for the field names. The actual + * message descriptor can be looked up either in the linked-in descriptor pool or a filename + * provided by the caller using the {@code descriptor_source} attribute. + * + *

Each output tensor is a dense tensor. This means that it is padded to hold the largest + * number of repeated elements seen in the input minibatch. (The shape is also padded by one to + * prevent zero-sized dimensions). The actual repeat counts for each example in the minibatch can + * be found in the {@code sizes} output. In many cases the output of {@code decode_proto} is fed + * immediately into tf.squeeze if missing values are not a concern. When using tf.squeeze, always + * pass the squeeze dimension explicitly to avoid surprises. + * + *

For the most part, the mapping between Proto field types and TensorFlow dtypes is + * straightforward. However, there are a few special cases: + * + *

    + *
  • + *

    A proto field that contains a submessage or group can only be converted to {@code + * DT_STRING} (the serialized submessage). This is to reduce the complexity of the API. The + * resulting string can be used as input to another instance of the decode_proto op. + *

  • + *

    TensorFlow lacks support for unsigned integers. The ops represent uint64 types as a + * {@code DT_INT64} with the same twos-complement bit pattern (the obvious way). Unsigned + * int32 values can be represented exactly by specifying type {@code DT_INT64}, or using + * twos-complement if the caller specifies {@code DT_INT32} in the {@code output_types} + * attribute. + *

+ * + *

Both binary and text proto serializations are supported, and can be chosen using the {@code + * format} attribute. + * + *

The {@code descriptor_source} attribute selects the source of protocol descriptors to + * consult when looking up {@code message_type}. This may be: + * + *

    + *
  • + *

    An empty string or "local://", in which case protocol descriptors are + * created for C++ (not Python) proto definitions linked to the binary. + *

  • + *

    A file, in which case protocol descriptors are created from the file, which is + * expected to contain a {@code FileDescriptorSet} serialized as a string. NOTE: You can + * build a {@code descriptor_source} file using the {@code --descriptor_set_out} and {@code + * --include_imports} options to the protocol compiler {@code protoc}. + *

  • + *

    A "bytes://<bytes>", in which protocol descriptors are created from + * {@code }, which is expected to be a {@code FileDescriptorSet} serialized as a + * string. + *

* * @param bytes Tensor of serialized protos with shape {@code batch_shape}. * @param messageType Name of the proto message type to decode. - * @param fieldNames List of strings containing proto field names. An extension field can be decoded - * by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME. + * @param fieldNames List of strings containing proto field names. An extension field can be + * decoded by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME. * @param outputTypes List of TF types to use for the respective field in field_names. * @param options carries optional attribute values * @return a new instance of DecodeProto */ - public DecodeProto decodeProto(Operand bytes, String messageType, - List fieldNames, List> outputTypes, + public DecodeProto decodeProto( + Operand bytes, + String messageType, + List fieldNames, + List> outputTypes, DecodeProto.Options... options) { return DecodeProto.create(scope, bytes, messageType, fieldNames, outputTypes, options); } @@ -2153,55 +2189,57 @@ public DeleteSessionTensor deleteSessionTensor(Operand handle) { } /** - * Deletes the resource specified by the handle. - * All subsequent operations using the resource will result in a NotFound - * error status. + * Deletes the resource specified by the handle. All subsequent operations using the resource will + * result in a NotFound error status. * * @param resource handle to the resource to delete. * @param options carries optional attribute values * @return a new instance of DestroyResourceOp */ - public DestroyResourceOp destroyResourceOp(Operand resource, - DestroyResourceOp.Options... options) { + public DestroyResourceOp destroyResourceOp( + Operand resource, DestroyResourceOp.Options... options) { return DestroyResourceOp.create(scope, resource, options); } /** - * Destroys the temporary variable and returns its final value. - * Sets output to the value of the Tensor pointed to by 'ref', then destroys - * the temporary variable called 'var_name'. - * All other uses of 'ref' must have executed before this op. - * This is typically achieved by chaining the ref through each assign op, or by - * using control dependencies. - *

Outputs the final value of the tensor pointed to by 'ref'. + * Destroys the temporary variable and returns its final value. Sets output to the value of the + * Tensor pointed to by 'ref', then destroys the temporary variable called 'var_name'. All other + * uses of 'ref' must have executed before this op. This is typically achieved by + * chaining the ref through each assign op, or by using control dependencies. + * + *

Outputs the final value of the tensor pointed to by 'ref'. * * @param data type for {@code value} output * @param ref A reference to the temporary variable tensor. * @param varName Name of the temporary variable, usually the name of the matching - * 'TemporaryVariable' op. + * 'TemporaryVariable' op. * @param data type for {@code DestroyTemporaryVariable} output and operands * @return a new instance of DestroyTemporaryVariable */ - public DestroyTemporaryVariable destroyTemporaryVariable(Operand ref, - String varName) { + public DestroyTemporaryVariable destroyTemporaryVariable( + Operand ref, String varName) { return DestroyTemporaryVariable.create(scope, ref, varName); } /** - * Partitions {@code data} into {@code num_partitions} tensors using indices from {@code partitions}. - * For each index tuple {@code js} of size {@code partitions.ndim}, the slice {@code data[js, ...]} - * becomes part of {@code outputs[partitions[js]]}. The slices with {@code partitions[js] = i} - * are placed in {@code outputs[i]} in lexicographic order of {@code js}, and the first - * dimension of {@code outputs[i]} is the number of entries in {@code partitions} equal to {@code i}. - * In detail, - *

+   * Partitions {@code data} into {@code num_partitions} tensors using indices from {@code
+   * partitions}. For each index tuple {@code js} of size {@code partitions.ndim}, the slice {@code
+   * data[js, ...]} becomes part of {@code outputs[partitions[js]]}. The slices with {@code
+   * partitions[js] = i} are placed in {@code outputs[i]} in lexicographic order of {@code js}, and
+   * the first dimension of {@code outputs[i]} is the number of entries in {@code partitions} equal
+   * to {@code i}. In detail,
+   *
+   * 
    *      outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
    *
    *      outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
    *  
- *

{@code data.shape} must start with {@code partitions.shape}. - *

For example: - *

+   *
+   * 

{@code data.shape} must start with {@code partitions.shape}. + * + *

For example: + * + *

    *      # Scalar partitions.
    *      partitions = 1
    *      num_partitions = 2
@@ -2216,50 +2254,58 @@ public  DestroyTemporaryVariable destroyTemporaryVariable(Op
    *      outputs[0] = [10, 20, 50]
    *      outputs[1] = [30, 40]
    *  
- *

See {@code dynamic_stitch} for an example on how to merge partitions back. - *

- * - *
+ * + *

See {@code dynamic_stitch} for an example on how to merge partitions back.

* * @param data type for {@code outputs} output * @param data the data value - * @param partitions Any shape. Indices in the range {@code [0, num_partitions)}. + * @param partitions Any shape. Indices in the range {@code [0, num_partitions)}. * @param numPartitions The number of partitions to output. * @param data type for {@code DynamicPartition} output and operands * @return a new instance of DynamicPartition */ - public DynamicPartition dynamicPartition(Operand data, - Operand partitions, Long numPartitions) { + public DynamicPartition dynamicPartition( + Operand data, Operand partitions, Long numPartitions) { return DynamicPartition.create(scope, data, partitions, numPartitions); } /** - * Interleave the values from the {@code data} tensors into a single tensor. - * Builds a merged tensor such that - *
+   * Interleave the values from the {@code data} tensors into a single tensor. Builds a merged
+   * tensor such that
+   *
+   * 
    *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
    *  
- *

For example, if each {@code indices[m]} is scalar or vector, we have - *

+   *
+   * 

For example, if each {@code indices[m]} is scalar or vector, we have + * + *

    *      # Scalar indices:
    *      merged[indices[m], ...] = data[m][...]
    *
    *      # Vector indices:
    *      merged[indices[m][i], ...] = data[m][i, ...]
    *  
- *

Each {@code data[i].shape} must start with the corresponding {@code indices[i].shape}, - * and the rest of {@code data[i].shape} must be constant w.r.t. {@code i}. That is, we - * must have {@code data[i].shape = indices[i].shape + constant}. In terms of this - * {@code constant}, the output shape is - *

+   *
+   * 

Each {@code data[i].shape} must start with the corresponding {@code indices[i].shape}, and + * the rest of {@code data[i].shape} must be constant w.r.t. {@code i}. That is, we must have + * {@code data[i].shape = indices[i].shape + constant}. In terms of this {@code constant}, the + * output shape is + * + *

    *  merged.shape = [max(indices)] + constant
    *  
- *

Values are merged in order, so if an index appears in both {@code indices[m][i]} and - * {@code indices[n][j]} for {@code (m,i) < (n,j)} the slice {@code data[n][j]} will appear in the - * merged result. If you do not need this guarantee, ParallelDynamicStitch might - * perform better on some devices. - *

For example: - *

+   *
+   * 

Values are merged in order, so if an index appears in both {@code indices[m][i]} and {@code + * indices[n][j]} for {@code (m,i) < (n,j)} the slice {@code data[n][j]} will appear in the merged + * result. If you do not need this guarantee, ParallelDynamicStitch might perform better on some + * devices. + * + *

For example: + * + *

    *      indices[0] = 6
    *      indices[1] = [4, 1]
    *      indices[2] = [[5, 2], [0, 3]]
@@ -2269,9 +2315,11 @@ public  DynamicPartition dynamicPartition(Operand data,
    *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
    *                [51, 52], [61, 62]]
    *  
- *

This method can be used to merge partitions created by {@code dynamic_partition} - * as illustrated on the following example: - *

+   *
+   * 

This method can be used to merge partitions created by {@code dynamic_partition} as + * illustrated on the following example: + * + *

    *      # Apply function (increments x_i) on elements for which a certain condition
    *      # apply (x_i != -1 in this example).
    *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
@@ -2285,9 +2333,9 @@ public  DynamicPartition dynamicPartition(Operand data,
    *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
    *      # unchanged.
    *  
- *
- * - *
+ * + *
* * @param data type for {@code merged} output * @param indices the indices value @@ -2295,43 +2343,54 @@ public DynamicPartition dynamicPartition(Operand data, * @param data type for {@code DynamicStitch} output and operands * @return a new instance of DynamicStitch */ - public DynamicStitch dynamicStitch(Iterable> indices, - Iterable> data) { + public DynamicStitch dynamicStitch( + Iterable> indices, Iterable> data) { return DynamicStitch.create(scope, indices, data); } /** - * Computes the (possibly normalized) Levenshtein Edit Distance. - * The inputs are variable-length sequences provided by SparseTensors - * (hypothesis_indices, hypothesis_values, hypothesis_shape) - * and - * (truth_indices, truth_values, truth_shape). - *

The inputs are: - * - * @param hypothesisIndices The indices of the hypothesis list SparseTensor. - * This is an N x R int64 matrix. - * @param hypothesisValues The values of the hypothesis list SparseTensor. - * This is an N-length vector. - * @param hypothesisShape The shape of the hypothesis list SparseTensor. - * This is an R-length vector. - * @param truthIndices The indices of the truth list SparseTensor. - * This is an M x R int64 matrix. - * @param truthValues The values of the truth list SparseTensor. - * This is an M-length vector. + * Computes the (possibly normalized) Levenshtein Edit Distance. The inputs are variable-length + * sequences provided by SparseTensors (hypothesis_indices, hypothesis_values, hypothesis_shape) + * and (truth_indices, truth_values, truth_shape). + * + *

The inputs are: + * + * @param hypothesisIndices The indices of the hypothesis list SparseTensor. This is an N x R + * int64 matrix. + * @param hypothesisValues The values of the hypothesis list SparseTensor. This is an N-length + * vector. + * @param hypothesisShape The shape of the hypothesis list SparseTensor. This is an R-length + * vector. + * @param truthIndices The indices of the truth list SparseTensor. This is an M x R int64 matrix. + * @param truthValues The values of the truth list SparseTensor. This is an M-length vector. * @param truthShape truth indices, vector. * @param options carries optional attribute values * @param data type for {@code EditDistance} output and operands * @return a new instance of EditDistance */ - public EditDistance editDistance(Operand hypothesisIndices, - Operand hypothesisValues, Operand hypothesisShape, Operand truthIndices, - Operand truthValues, Operand truthShape, EditDistance.Options... options) { - return EditDistance.create(scope, hypothesisIndices, hypothesisValues, hypothesisShape, truthIndices, truthValues, truthShape, options); + public EditDistance editDistance( + Operand hypothesisIndices, + Operand hypothesisValues, + Operand hypothesisShape, + Operand truthIndices, + Operand truthValues, + Operand truthShape, + EditDistance.Options... options) { + return EditDistance.create( + scope, + hypothesisIndices, + hypothesisValues, + hypothesisShape, + truthIndices, + truthValues, + truthShape, + options); } /** * Creates a tensor with the given shape. - *

This operation creates a tensor of {@code shape} and {@code dtype}. + * + *

This operation creates a tensor of {@code shape} and {@code dtype}. * * @param data type for {@code output} output * @param shape 1-D. Represents the shape of the output tensor. @@ -2340,18 +2399,17 @@ public EditDistance editDistance(Operand hypothesisInd * @param data type for {@code Empty} output and operands * @return a new instance of Empty */ - public Empty empty(Operand shape, Class dtype, - Empty.Options... options) { + public Empty empty( + Operand shape, Class dtype, Empty.Options... options) { return Empty.create(scope, shape, dtype, options); } /** - * Creates and returns an empty tensor list. - * All list elements must be tensors of dtype element_dtype and shape compatible - * with element_shape. - *

handle: an empty tensor list. - * element_dtype: the type of elements in the list. - * element_shape: a shape compatible with that of elements in the list. + * Creates and returns an empty tensor list. All list elements must be tensors of dtype + * element_dtype and shape compatible with element_shape. + * + *

handle: an empty tensor list. element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. * * @param elementShape the elementShape value * @param maxNumElements the maxNumElements value @@ -2359,14 +2417,15 @@ public Empty empty(Operand shape, Class dtype, * @param data type for {@code EmptyTensorList} output and operands * @return a new instance of EmptyTensorList */ - public EmptyTensorList emptyTensorList(Operand elementShape, - Operand maxNumElements, Class elementDtype) { + public EmptyTensorList emptyTensorList( + Operand elementShape, + Operand maxNumElements, + Class elementDtype) { return EmptyTensorList.create(scope, elementShape, maxNumElements, elementDtype); } /** - * Creates and returns an empty tensor map. - * handle: an empty tensor map + * Creates and returns an empty tensor map. handle: an empty tensor map * * @return a new instance of EmptyTensorMap */ @@ -2375,52 +2434,51 @@ public EmptyTensorMap emptyTensorMap() { } /** - * The op serializes protobuf messages provided in the input tensors. - * The types of the tensors in {@code values} must match the schema for the fields - * specified in {@code field_names}. All the tensors in {@code values} must have a common - * shape prefix, batch_shape. - *

The {@code sizes} tensor specifies repeat counts for each field. The repeat count - * (last dimension) of a each tensor in {@code values} must be greater than or equal - * to corresponding repeat count in {@code sizes}. - *

A {@code message_type} name must be provided to give context for the field names. - * The actual message descriptor can be looked up either in the linked-in - * descriptor pool or a filename provided by the caller using the - * {@code descriptor_source} attribute. - *

For the most part, the mapping between Proto field types and TensorFlow dtypes - * is straightforward. However, there are a few special cases: - *

    - *
  • - *

    A proto field that contains a submessage or group can only be converted - * to {@code DT_STRING} (the serialized submessage). This is to reduce the complexity - * of the API. The resulting string can be used as input to another instance of - * the decode_proto op. - *

  • - *
  • - *

    TensorFlow lacks support for unsigned integers. The ops represent uint64 - * types as a {@code DT_INT64} with the same twos-complement bit pattern (the obvious - * way). Unsigned int32 values can be represented exactly by specifying type - * {@code DT_INT64}, or using twos-complement if the caller specifies {@code DT_INT32} in - * the {@code output_types} attribute. - *

  • - *
- *

The {@code descriptor_source} attribute selects the source of protocol - * descriptors to consult when looking up {@code message_type}. This may be: - *

    - *
  • - *

    An empty string or "local://", in which case protocol descriptors are - * created for C++ (not Python) proto definitions linked to the binary. - *

  • - *
  • - *

    A file, in which case protocol descriptors are created from the file, - * which is expected to contain a {@code FileDescriptorSet} serialized as a string. - * NOTE: You can build a {@code descriptor_source} file using the {@code --descriptor_set_out} - * and {@code --include_imports} options to the protocol compiler {@code protoc}. - *

  • - *
  • - *

    A "bytes://<bytes>", in which protocol descriptors are created from {@code }, - * which is expected to be a {@code FileDescriptorSet} serialized as a string. - *

  • - *
+ * The op serializes protobuf messages provided in the input tensors. The types of the tensors in + * {@code values} must match the schema for the fields specified in {@code field_names}. All the + * tensors in {@code values} must have a common shape prefix, batch_shape. + * + *

The {@code sizes} tensor specifies repeat counts for each field. The repeat count (last + * dimension) of a each tensor in {@code values} must be greater than or equal to corresponding + * repeat count in {@code sizes}. + * + *

A {@code message_type} name must be provided to give context for the field names. The actual + * message descriptor can be looked up either in the linked-in descriptor pool or a filename + * provided by the caller using the {@code descriptor_source} attribute. + * + *

For the most part, the mapping between Proto field types and TensorFlow dtypes is + * straightforward. However, there are a few special cases: + * + *

    + *
  • + *

    A proto field that contains a submessage or group can only be converted to {@code + * DT_STRING} (the serialized submessage). This is to reduce the complexity of the API. The + * resulting string can be used as input to another instance of the decode_proto op. + *

  • + *

    TensorFlow lacks support for unsigned integers. The ops represent uint64 types as a + * {@code DT_INT64} with the same twos-complement bit pattern (the obvious way). Unsigned + * int32 values can be represented exactly by specifying type {@code DT_INT64}, or using + * twos-complement if the caller specifies {@code DT_INT32} in the {@code output_types} + * attribute. + *

+ * + *

The {@code descriptor_source} attribute selects the source of protocol descriptors to + * consult when looking up {@code message_type}. This may be: + * + *

    + *
  • + *

    An empty string or "local://", in which case protocol descriptors are + * created for C++ (not Python) proto definitions linked to the binary. + *

  • + *

    A file, in which case protocol descriptors are created from the file, which is + * expected to contain a {@code FileDescriptorSet} serialized as a string. NOTE: You can + * build a {@code descriptor_source} file using the {@code --descriptor_set_out} and {@code + * --include_imports} options to the protocol compiler {@code protoc}. + *

  • + *

    A "bytes://<bytes>", in which protocol descriptors are created from + * {@code }, which is expected to be a {@code FileDescriptorSet} serialized as a + * string. + *

* * @param sizes Tensor of int32 with shape {@code [batch_shape, len(field_names)]}. * @param values List of tensors containing values for the corresponding field. @@ -2429,15 +2487,18 @@ public EmptyTensorMap emptyTensorMap() { * @param options carries optional attribute values * @return a new instance of EncodeProto */ - public EncodeProto encodeProto(Operand sizes, Iterable> values, - List fieldNames, String messageType, EncodeProto.Options... options) { + public EncodeProto encodeProto( + Operand sizes, + Iterable> values, + List fieldNames, + String messageType, + EncodeProto.Options... options) { return EncodeProto.create(scope, sizes, values, fieldNames, messageType, options); } /** - * Ensures that the tensor's shape matches the expected shape. - * Raises an error if the input tensor's shape does not match the specified shape. - * Returns the input tensor otherwise. + * Ensures that the tensor's shape matches the expected shape. Raises an error if the input + * tensor's shape does not match the specified shape. Returns the input tensor otherwise. * * @param data type for {@code output} output * @param input A tensor, whose shape is to be validated. @@ -2450,16 +2511,19 @@ public EnsureShape ensureShape(Operand input, Shape shap } /** - * Inserts a dimension of 1 into a tensor's shape. - * Given a tensor {@code input}, this operation inserts a dimension of 1 at the - * dimension index {@code axis} of {@code input}'s shape. The dimension index {@code axis} starts at - * zero; if you specify a negative number for {@code axis} it is counted backward from - * the end. - *

This operation is useful if you want to add a batch dimension to a single - * element. For example, if you have a single image of shape {@code [height, width, channels]}, you can make it a batch of 1 image with {@code expand_dims(image, 0)}, - * which will make the shape {@code [1, height, width, channels]}. - *

Other examples: - *

+   * Inserts a dimension of 1 into a tensor's shape. Given a tensor {@code input}, this operation
+   * inserts a dimension of 1 at the dimension index {@code axis} of {@code input}'s shape. The
+   * dimension index {@code axis} starts at zero; if you specify a negative number for {@code axis}
+   * it is counted backward from the end.
+   *
+   * 

This operation is useful if you want to add a batch dimension to a single element. For + * example, if you have a single image of shape {@code [height, width, channels]}, you can make it + * a batch of 1 image with {@code expand_dims(image, 0)}, which will make the shape {@code [1, + * height, width, channels]}. + * + *

Other examples: + * + *

    *  # 't' is a tensor of shape [2]
    *  shape(expand_dims(t, 0)) ==> [1, 2]
    *  shape(expand_dims(t, 1)) ==> [2, 1]
@@ -2470,72 +2534,79 @@ public  EnsureShape ensureShape(Operand input, Shape shap
    *  shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
    *  shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
    *  
- *

This operation requires that: - *

{@code -1-input.dims() <= dim <= input.dims()} - *

This operation is related to {@code squeeze()}, which removes dimensions of - * size 1. + * + *

This operation requires that: + * + *

{@code -1-input.dims() <= dim <= input.dims()} + * + *

This operation is related to {@code squeeze()}, which removes dimensions of size 1. * * @param data type for {@code output} output * @param input the input value - * @param axis 0-D (scalar). Specifies the dimension index at which to - * expand the shape of {@code input}. Must be in the range - * {@code [-rank(input) - 1, rank(input)]}. + * @param axis 0-D (scalar). Specifies the dimension index at which to expand the shape of {@code + * input}. Must be in the range {@code [-rank(input) - 1, rank(input)]}. * @param data type for {@code ExpandDims} output and operands * @return a new instance of ExpandDims */ - public ExpandDims expandDims(Operand input, - Operand axis) { + public ExpandDims expandDims( + Operand input, Operand axis) { return ExpandDims.create(scope, input, axis); } /** - * Extract {@code patches} from {@code input} and put them in the {@code "depth"} output dimension. 3D extension of {@code extract_image_patches}. + * Extract {@code patches} from {@code input} and put them in the {@code "depth"} output + * dimension. 3D extension of {@code extract_image_patches}. * * @param data type for {@code patches} output * @param input 5-D Tensor with shape {@code [batch, in_planes, in_rows, in_cols, depth]}. * @param ksizes The size of the sliding window for each dimension of {@code input}. - * @param strides 1-D of length 5. How far the centers of two consecutive patches are in - * {@code input}. Must be: {@code [1, stride_planes, stride_rows, stride_cols, 1]}. + * @param strides 1-D of length 5. How far the centers of two consecutive patches are in {@code + * input}. Must be: {@code [1, stride_planes, stride_rows, stride_cols, 1]}. * @param padding The type of padding algorithm to use. - *

The size-related attributes are specified as follows: - *

+   *     

The size-related attributes are specified as follows: + *

    *  ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
    *  strides = [1, stride_planes, strides_rows, strides_cols, 1]
    *  
+ * * @param data type for {@code ExtractVolumePatches} output and operands * @return a new instance of ExtractVolumePatches */ - public ExtractVolumePatches extractVolumePatches(Operand input, - List ksizes, List strides, String padding) { + public ExtractVolumePatches extractVolumePatches( + Operand input, List ksizes, List strides, String padding) { return ExtractVolumePatches.create(scope, input, ksizes, strides, padding); } /** - * Creates a tensor filled with a scalar value. - * This operation creates a tensor of shape {@code dims} and fills it with {@code value}. - *

For example: - *

+   * Creates a tensor filled with a scalar value. This operation creates a tensor of shape {@code
+   * dims} and fills it with {@code value}.
+   *
+   * 

For example: + * + *

    *  # Output tensor has shape [2, 3].
    *  fill([2, 3], 9) ==> [[9, 9, 9]
    *                       [9, 9, 9]]
    *  
- *

{@code tf.fill} differs from {@code tf.constant} in a few ways: - *

    - *
  • {@code tf.fill} only supports scalar contents, whereas {@code tf.constant} supports - * Tensor values.
  • - *
  • {@code tf.fill} creates an Op in the computation graph that constructs the actual - * Tensor value at runtime. This is in contrast to {@code tf.constant} which embeds - * the entire Tensor into the graph with a {@code Const} node.
  • - *
  • Because {@code tf.fill} evaluates at graph runtime, it supports dynamic shapes - * based on other runtime Tensors, unlike {@code tf.constant}.
  • - *
+ * + *

{@code tf.fill} differs from {@code tf.constant} in a few ways: + * + *

    + *
  • {@code tf.fill} only supports scalar contents, whereas {@code tf.constant} supports + * Tensor values. + *
  • {@code tf.fill} creates an Op in the computation graph that constructs the actual Tensor + * value at runtime. This is in contrast to {@code tf.constant} which embeds the entire + * Tensor into the graph with a {@code Const} node. + *
  • Because {@code tf.fill} evaluates at graph runtime, it supports dynamic shapes based on + * other runtime Tensors, unlike {@code tf.constant}. + *
* * @param data type for {@code output} output * @param dims 1-D. Represents the shape of the output tensor. * @param value 0-D (scalar). Value to fill the returned tensor. - *

{@literal @}compatibility(numpy)
- * Equivalent to np.full - *
{@literal @}end_compatibility + *

{@literal @}compatibility(numpy)
+ * Equivalent to np.full
+ * {@literal @}end_compatibility * @param data type for {@code Fill} output and operands * @return a new instance of Fill */ @@ -2544,34 +2615,37 @@ public Fill fill(Operand dims, OperandFingerprint op considers the first dimension of {@code data} as the batch dimension, - * and {@code output[i]} contains the fingerprint value generated from contents in - * {@code data[i, ...]} for all {@code i}. - *

Fingerprint op writes fingerprint values as byte arrays. For example, the - * default method {@code farmhash64} generates a 64-bit fingerprint value at a time. - * This 8-byte value is written out as an {@code uint8} array of size 8, in little-endian - * order. - *

For example, suppose that {@code data} has data type {@code DT_INT32} and shape (2, 3, 4), - * and that the fingerprint method is {@code farmhash64}. In this case, the output shape - * is (2, 8), where 2 is the batch dimension size of {@code data}, and 8 is the size of - * each fingerprint value in bytes. {@code output[0, :]} is generated from 12 integers in - * {@code data[0, :, :]} and similarly {@code output[1, :]} is generated from other 12 integers - * in {@code data[1, :, :]}. - *

Note that this op fingerprints the raw underlying buffer, and it does not - * fingerprint Tensor's metadata such as data type and/or shape. For example, the - * fingerprint values are invariant under reshapes and bitcasts as long as the - * batch dimension remain the same: - *

+   * Generates fingerprint values. Generates fingerprint values of {@code data}.
+   *
+   * 

Fingerprint op considers the first dimension of {@code data} as the batch dimension, and + * {@code output[i]} contains the fingerprint value generated from contents in {@code data[i, + * ...]} for all {@code i}. + * + *

Fingerprint op writes fingerprint values as byte arrays. For example, the default method + * {@code farmhash64} generates a 64-bit fingerprint value at a time. This 8-byte value is written + * out as an {@code uint8} array of size 8, in little-endian order. + * + *

For example, suppose that {@code data} has data type {@code DT_INT32} and shape (2, 3, 4), + * and that the fingerprint method is {@code farmhash64}. In this case, the output shape is (2, + * 8), where 2 is the batch dimension size of {@code data}, and 8 is the size of each fingerprint + * value in bytes. {@code output[0, :]} is generated from 12 integers in {@code data[0, :, :]} and + * similarly {@code output[1, :]} is generated from other 12 integers in {@code data[1, :, :]}. + * + *

Note that this op fingerprints the raw underlying buffer, and it does not fingerprint + * Tensor's metadata such as data type and/or shape. For example, the fingerprint values are + * invariant under reshapes and bitcasts as long as the batch dimension remain the same: + * + *

    *  Fingerprint(data) == Fingerprint(Reshape(data, ...))
    *  Fingerprint(data) == Fingerprint(Bitcast(data, ...))
    *  
- *

For string data, one should expect {@code Fingerprint(data) != Fingerprint(ReduceJoin(data))} in general. + * + *

For string data, one should expect {@code Fingerprint(data) != + * Fingerprint(ReduceJoin(data))} in general. * * @param data Must have rank 1 or higher. - * @param method Fingerprint method used by this op. Currently available method is - * {@code farmhash::fingerprint64}. + * @param method Fingerprint method used by this op. Currently available method is {@code + * farmhash::fingerprint64}. * @return a new instance of Fingerprint */ public Fingerprint fingerprint(Operand data, Operand method) { @@ -2579,6 +2653,8 @@ public Fingerprint fingerprint(Operand data, Operand m } /** + * + * *

    *   output = input;
    *   for i in range(start, limit, delta)
@@ -2589,22 +2665,30 @@ public Fingerprint fingerprint(Operand data, Operand m
    * @param limit The upper bound. An int32
    * @param delta The increment. An int32
    * @param input A list of input tensors whose types are T.
-   * @param body 
+   * @param body
+   *     
    *  A function that takes a list of tensors (int32, T) and returns another
    *  list of tensors (T).
    *  
+ * * @return a new instance of For */ - public For forOp(Operand start, Operand limit, Operand delta, - Iterable> input, ConcreteFunction body) { + public For forOp( + Operand start, + Operand limit, + Operand delta, + Iterable> input, + ConcreteFunction body) { return For.create(scope, start, limit, delta, input, body); } /** - * Gather slices from {@code params} axis {@code axis} according to {@code indices}. - * {@code indices} must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape {@code params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis + 1:]} where: - *
+   * Gather slices from {@code params} axis {@code axis} according to {@code indices}. {@code
+   * indices} must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an output
+   * tensor with shape {@code params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis +
+   * 1:]} where:
+   *
+   * 
    *      # Scalar indices (output is rank(params) - 1).
    *      output[a_0, ..., a_n, b_0, ..., b_n] =
    *        params[a_0, ..., a_n, indices, b_0, ..., b_n]
@@ -2617,70 +2701,83 @@ public For forOp(Operand start, Operand limit, Operand d
    *      output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
    *        params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
    *  
- *
- * - *
- *

Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, a 0 is stored in the - * corresponding output value. - *

See also {@code tf.batch_gather} and {@code tf.gather_nd}. + * + *

+ * + *

Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out + * of bound index is found, a 0 is stored in the corresponding output value. + * + *

See also {@code tf.batch_gather} and {@code tf.gather_nd}. * * @param data type for {@code output} output - * @param params The tensor from which to gather values. Must be at least rank - * {@code axis + 1}. + * @param params The tensor from which to gather values. Must be at least rank {@code axis + 1}. * @param indices Index tensor. Must be in range {@code [0, params.shape[axis])}. * @param axis The axis in {@code params} to gather {@code indices} from. Defaults to the first - * dimension. Supports negative indexes. + * dimension. Supports negative indexes. * @param options carries optional attribute values * @param data type for {@code GatherV2} output and operands * @return a new instance of Gather */ - public Gather gather(Operand params, Operand indices, - Operand axis, Gather.Options... options) { + public Gather gather( + Operand params, + Operand indices, + Operand axis, + Gather.Options... options) { return Gather.create(scope, params, indices, axis, options); } /** - * Gather slices from {@code params} into a Tensor with shape specified by {@code indices}. - * {@code indices} is a K-dimensional integer tensor, best thought of as a - * (K-1)-dimensional tensor of indices into {@code params}, where each element defines a - * slice of {@code params}: - *

+   * Gather slices from {@code params} into a Tensor with shape specified by {@code indices}. {@code
+   * indices} is a K-dimensional integer tensor, best thought of as a (K-1)-dimensional tensor of
+   * indices into {@code params}, where each element defines a slice of {@code params}:
+   *
+   * 
    *  output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
    *  
- *

Whereas in {@code tf.gather} {@code indices} defines slices into the {@code axis} - * dimension of {@code params}, in {@code tf.gather_nd}, {@code indices} defines slices into the - * first {@code N} dimensions of {@code params}, where {@code N = indices.shape[-1]}. - *

The last dimension of {@code indices} can be at most the rank of - * {@code params}: - *

+   *
+   * 

Whereas in {@code tf.gather} {@code indices} defines slices into the {@code axis} dimension + * of {@code params}, in {@code tf.gather_nd}, {@code indices} defines slices into the first + * {@code N} dimensions of {@code params}, where {@code N = indices.shape[-1]}. + * + *

The last dimension of {@code indices} can be at most the rank of {@code params}: + * + *

    *  indices.shape[-1] <= params.rank
    *  
- *

The last dimension of {@code indices} corresponds to elements - * (if {@code indices.shape[-1] == params.rank}) or slices - * (if {@code indices.shape[-1] < params.rank}) along dimension {@code indices.shape[-1]} - * of {@code params}. The output tensor has shape - *

+   *
+   * 

The last dimension of {@code indices} corresponds to elements (if {@code indices.shape[-1] + * == params.rank}) or slices (if {@code indices.shape[-1] < params.rank}) along dimension {@code + * indices.shape[-1]} of {@code params}. The output tensor has shape + * + *

    *  indices.shape[:-1] + params.shape[indices.shape[-1]:]
    *  
- *

Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, a 0 is stored in the - * corresponding output value. - *

Some examples below. - *

Simple indexing into a matrix: - *

+   *
+   * 

Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out + * of bound index is found, a 0 is stored in the corresponding output value. + * + *

Some examples below. + * + *

Simple indexing into a matrix: + * + *

    *      indices = [[0, 0], [1, 1]]
    *      params = [['a', 'b'], ['c', 'd']]
    *      output = ['a', 'd']
    *  
- *

Slice indexing into a matrix: - *

+   *
+   * 

Slice indexing into a matrix: + * + *

    *      indices = [[1], [0]]
    *      params = [['a', 'b'], ['c', 'd']]
    *      output = [['c', 'd'], ['a', 'b']]
    *  
- *

Indexing into a 3-tensor: - *

+   *
+   * 

Indexing into a 3-tensor: + * + *

    *      indices = [[1]]
    *      params = [[['a0', 'b0'], ['c0', 'd0']],
    *                [['a1', 'b1'], ['c1', 'd1']]]
@@ -2698,20 +2795,26 @@ public  Gather gather(Operand params, Operand
-   *  

Batched indexing into a matrix: - *

+   *
+   * 

Batched indexing into a matrix: + * + *

    *      indices = [[[0, 0]], [[0, 1]]]
    *      params = [['a', 'b'], ['c', 'd']]
    *      output = [['a'], ['b']]
    *  
- *

Batched slice indexing into a matrix: - *

+   *
+   * 

Batched slice indexing into a matrix: + * + *

    *      indices = [[[1]], [[0]]]
    *      params = [['a', 'b'], ['c', 'd']]
    *      output = [[['c', 'd']], [['a', 'b']]]
    *  
- *

Batched indexing into a 3-tensor: - *

+   *
+   * 

Batched indexing into a 3-tensor: + * + *

    *      indices = [[[1]], [[0]]]
    *      params = [[['a0', 'b0'], ['c0', 'd0']],
    *                [['a1', 'b1'], ['c1', 'd1']]]
@@ -2730,7 +2833,8 @@ public  Gather gather(Operand params, Operand
-   *  

See also {@code tf.gather} and {@code tf.batch_gather}. + * + *

See also {@code tf.gather} and {@code tf.batch_gather}. * * @param data type for {@code output} output * @param params The tensor from which to gather values. @@ -2738,8 +2842,8 @@ public Gather gather(Operand params, Operand data type for {@code GatherNd} output and operands * @return a new instance of GatherNd */ - public GatherNd gatherNd(Operand params, - Operand indices) { + public GatherNd gatherNd( + Operand params, Operand indices) { return GatherNd.create(scope, params, indices); } @@ -2762,8 +2866,8 @@ public GetSessionHandle getSessionHandle(Operand value) { * @param data type for {@code GetSessionTensor} output and operands * @return a new instance of GetSessionTensor */ - public GetSessionTensor getSessionTensor(Operand handle, - Class dtype) { + public GetSessionTensor getSessionTensor( + Operand handle, Class dtype) { return GetSessionTensor.create(scope, handle, dtype); } @@ -2776,30 +2880,35 @@ public GetSessionTensor getSessionTensor(Operand h * @return a new instance of {@code Gradients} * @throws IllegalArgumentException if execution environment is not a graph */ - public Gradients gradients(Iterable> y, Iterable> x, + public Gradients gradients( + Iterable> y, + Iterable> x, Gradients.Options... options) { return Gradients.create(scope, y, x, options); } /** - * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, - * i.e., {@code d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...} - *

- * If {@code Options.dx()} values are set, they are as the initial symbolic partial derivatives of some loss - * function {@code L} w.r.t. {@code y}. {@code Options.dx()} must have the size of {@code y}. - *

- * If {@code Options.dx()} is not set, the implementation will use dx of {@code OnesLike} for all - * shapes in {@code y}. - *

- * The partial derivatives are returned in output {@code dy}, with the size of {@code x}. - *

- * Example of usage: - *

{@code
-   *  Gradients gradients = tf.gradients(loss, Arrays.asList(w, b));
-   *  Constant alpha = tf.constant(1.0f);
-   *  tf.train.applyGradientDescent(w, alpha, gradients.dy(0));
-   *  tf.train.applyGradientDescent(b, alpha, gradients.dy(1));
-   *  }
+ * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, i.e., + * {@code d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...} + * + *

If {@code Options.dx()} values are set, they are as the initial symbolic partial derivatives + * of some loss function {@code L} w.r.t. {@code y}. {@code Options.dx()} must have the size of + * {@code y}. + * + *

If {@code Options.dx()} is not set, the implementation will use dx of {@code OnesLike} for + * all shapes in {@code y}. + * + *

The partial derivatives are returned in output {@code dy}, with the size of {@code x}. + * + *

Example of usage: + * + *

{@code
+   * Gradients gradients = tf.gradients(loss, Arrays.asList(w, b));
+   * Constant alpha = tf.constant(1.0f);
+   * tf.train.applyGradientDescent(w, alpha, gradients.dy(0));
+   * tf.train.applyGradientDescent(b, alpha, gradients.dy(1));
+   *
+   * }
* * @param y output of the function to derive * @param x inputs of the function for which partial derivatives are computed @@ -2807,17 +2916,18 @@ public Gradients gradients(Iterable> y, Iterable y, Iterable> x, - Gradients.Options... options) { + public Gradients gradients( + Operand y, Iterable> x, Gradients.Options... options) { return Gradients.create(scope, y, x, options); } /** - * Gives a guarantee to the TF runtime that the input tensor is a constant. - * The runtime is then free to make optimizations based on this. - *

Only accepts value typed tensors as inputs and rejects resource variable handles - * as input. - *

Returns the input tensor without modification. + * Gives a guarantee to the TF runtime that the input tensor is a constant. The runtime is then + * free to make optimizations based on this. + * + *

Only accepts value typed tensors as inputs and rejects resource variable handles as input. + * + *

Returns the input tensor without modification. * * @param data type for {@code output} output * @param input the input value @@ -2829,10 +2939,9 @@ public GuaranteeConst guaranteeConst(Operand input) { } /** - * Creates a non-initialized hash table. - * This op creates a hash table, specifying the type of its keys and values. - * Before using the table you will have to initialize it. After initialization the - * table will be immutable. + * Creates a non-initialized hash table. This op creates a hash table, specifying the type of its + * keys and values. Before using the table you will have to initialize it. After initialization + * the table will be immutable. * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. @@ -2841,17 +2950,17 @@ public GuaranteeConst guaranteeConst(Operand input) { * @param data type for {@code HashTableV2} output and operands * @return a new instance of HashTable */ - public HashTable hashTable(Class keyDtype, - Class valueDtype, HashTable.Options... options) { + public HashTable hashTable( + Class keyDtype, Class valueDtype, HashTable.Options... options) { return HashTable.create(scope, keyDtype, valueDtype, options); } /** - * Return histogram of values. - * Given the tensor {@code values}, this operation returns a rank 1 histogram counting - * the number of entries in {@code values} that fall into every bin. The bins are - * equal width and determined by the arguments {@code value_range} and {@code nbins}. - *

+   * Return histogram of values. Given the tensor {@code values}, this operation returns a rank 1
+   * histogram counting the number of entries in {@code values} that fall into every bin. The bins
+   * are equal width and determined by the arguments {@code value_range} and {@code nbins}.
+   *
+   * 
    *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
    *  nbins = 5
    *  value_range = [0.0, 5.0]
@@ -2865,24 +2974,24 @@ public  HashTable hashTable(Class keyDtype,
    *
    * @param  data type for {@code out} output
    * @param values Numeric {@code Tensor}.
-   * @param valueRange Shape [2] {@code Tensor} of same {@code dtype} as {@code values}.
-   *  values <= value_range[0] will be mapped to hist[0],
-   *  values >= value_range[1] will be mapped to hist[-1].
-   * @param nbins Scalar {@code int32 Tensor}.  Number of histogram bins.
+   * @param valueRange Shape [2] {@code Tensor} of same {@code dtype} as {@code values}. values
+   *     <= value_range[0] will be mapped to hist[0], values >= value_range[1] will be mapped
+   *     to hist[-1].
+   * @param nbins Scalar {@code int32 Tensor}. Number of histogram bins.
    * @param  data type for {@code HistogramFixedWidth} output and operands
    * @return a new instance of HistogramFixedWidth, with default output types
    */
-  public  HistogramFixedWidth histogramFixedWidth(Operand values,
-      Operand valueRange, Operand nbins) {
+  public  HistogramFixedWidth histogramFixedWidth(
+      Operand values, Operand valueRange, Operand nbins) {
     return HistogramFixedWidth.create(scope, values, valueRange, nbins);
   }
 
   /**
-   * Return histogram of values.
-   *  Given the tensor {@code values}, this operation returns a rank 1 histogram counting
-   *  the number of entries in {@code values} that fall into every bin.  The bins are
-   *  equal width and determined by the arguments {@code value_range} and {@code nbins}.
-   *  
+   * Return histogram of values. Given the tensor {@code values}, this operation returns a rank 1
+   * histogram counting the number of entries in {@code values} that fall into every bin. The bins
+   * are equal width and determined by the arguments {@code value_range} and {@code nbins}.
+   *
+   * 
    *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
    *  nbins = 5
    *  value_range = [0.0, 5.0]
@@ -2896,10 +3005,10 @@ public  HistogramFixedWidth histogramFixedWidth(Opera
    *
    * @param  data type for {@code out} output
    * @param values Numeric {@code Tensor}.
-   * @param valueRange Shape [2] {@code Tensor} of same {@code dtype} as {@code values}.
-   *  values <= value_range[0] will be mapped to hist[0],
-   *  values >= value_range[1] will be mapped to hist[-1].
-   * @param nbins Scalar {@code int32 Tensor}.  Number of histogram bins.
+   * @param valueRange Shape [2] {@code Tensor} of same {@code dtype} as {@code values}. values
+   *     <= value_range[0] will be mapped to hist[0], values >= value_range[1] will be mapped
+   *     to hist[-1].
+   * @param nbins Scalar {@code int32 Tensor}. Number of histogram bins.
    * @param dtype the value of the dtype property
    * @param  data type for {@code HistogramFixedWidth} output and operands
    * @param  data type for {@code HistogramFixedWidth} output and operands
@@ -2923,12 +3032,12 @@ public  Identity identity(Operand input) {
   }
 
   /**
-   * Returns a list of tensors with the same shapes and contents as the input
-   *  tensors.
-   *  

This op can be used to override the gradient for complicated functions. For - * example, suppose y = f(x) and we wish to apply a custom function g for backprop - * such that dx = g(dy). In Python, - *

+   * Returns a list of tensors with the same shapes and contents as the input tensors.
+   *
+   * 

This op can be used to override the gradient for complicated functions. For example, suppose + * y = f(x) and we wish to apply a custom function g for backprop such that dx = g(dy). In Python, + * + *

    *  with tf.get_default_graph().gradient_override_map(
    *      {'IdentityN': 'OverrideGradientWithG'}):
    *    y, _ = identity_n([f(x), x])
@@ -2948,9 +3057,11 @@ public IdentityN identityN(Iterable> input) {
   /**
    * output = cond ? then_branch(input) : else_branch(input)
    *
-   *  

Selects between {@link StatefulIf} and {@link StatelessIf} based on the statefulness of the function arguments. + *

Selects between {@link StatefulIf} and {@link StatelessIf} based on the statefulness of the + * function arguments. * - * @param cond

+   * @param cond
+   *     
    *    A Tensor. If the tensor is a scalar of non-boolean type, the
    *    scalar is converted to a boolean according to the
    *    following rule: if the scalar is a numerical value, non-zero means
@@ -2958,39 +3069,48 @@ public IdentityN identityN(Iterable> input) {
    *    means `True` and empty means `False`. If the tensor is not a scalar,
    *    being empty means False and being non-empty means True.
    *  
+ * * @param input A list of input tensors. * @param Tout A list of output types. - * @param thenBranch
+   * @param thenBranch
+   *     
    *    A function that takes 'inputs' and returns a list of tensors, whose
    *    types are the same as what else_branch returns.
    *  
- * @param elseBranch
+   *
+   * @param elseBranch
+   *     
    *  A function that takes 'inputs' and returns a list of tensors, whose
    *  types are the same as what then_branch returns.
    *  
+ * * @param options carries optional attribute values * @return a new instance of If */ - public If ifOp(Operand cond, Iterable> input, - List> Tout, ConcreteFunction thenBranch, ConcreteFunction elseBranch, + public If ifOp( + Operand cond, + Iterable> input, + List> Tout, + ConcreteFunction thenBranch, + ConcreteFunction elseBranch, If.Options... options) { return If.create(scope, cond, input, Tout, thenBranch, elseBranch, options); } /** - * Returns immutable tensor from memory region. - * The current implementation memmaps the tensor from a file. + * Returns immutable tensor from memory region. The current implementation memmaps the tensor from + * a file. * * @param data type for {@code tensor} output * @param dtype Type of the returned tensor. * @param shape Shape of the returned tensor. * @param memoryRegionName Name of readonly memory region used by the tensor, see - * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. * @param data type for {@code ImmutableConst} output and operands * @return a new instance of ImmutableConst */ - public ImmutableConst immutableConst(Class dtype, Shape shape, - String memoryRegionName) { + public ImmutableConst immutableConst( + Class dtype, Shape shape, String memoryRegionName) { return ImmutableConst.create(scope, dtype, shape, memoryRegionName); } @@ -3002,49 +3122,56 @@ public ImmutableConst immutableConst(Class dtype, Shape * @param values Values of type Tval. * @return a new instance of InitializeTable */ - public InitializeTable initializeTable(Operand tableHandle, - Operand keys, Operand values) { + public InitializeTable initializeTable( + Operand tableHandle, + Operand keys, + Operand values) { return InitializeTable.create(scope, tableHandle, keys, values); } /** - * Initializes a table from a text file. - * It inserts one key-value pair into the table for each line of the file. - * The key and value is extracted from the whole line content, elements from the - * split line based on {@code delimiter} or the line number (starting from zero). - * Where to extract the key and value from a line is specified by {@code key_index} and - * {@code value_index}. - *
    - *
  • A value of -1 means use the line number(starting from zero), expects {@code int64}.
  • - *
  • A value of -2 means use the whole line content, expects {@code string}.
  • - *
  • A value >= 0 means use the index (starting at zero) of the split line based - * on {@code delimiter}.
  • - *
+ * Initializes a table from a text file. It inserts one key-value pair into the table for each + * line of the file. The key and value is extracted from the whole line content, elements from the + * split line based on {@code delimiter} or the line number (starting from zero). Where to extract + * the key and value from a line is specified by {@code key_index} and {@code value_index}. + * + *
    + *
  • A value of -1 means use the line number(starting from zero), expects {@code int64}. + *
  • A value of -2 means use the whole line content, expects {@code string}. + *
  • A value >= 0 means use the index (starting at zero) of the split line based on {@code + * delimiter}. + *
* * @param tableHandle Handle to a table which will be initialized. * @param filename Filename of a vocabulary text file. * @param keyIndex Column index in a line to get the table {@code key} values from. - * @param valueIndex Column index that represents information of a line to get the table - * {@code value} values from. + * @param valueIndex Column index that represents information of a line to get the table {@code + * value} values from. * @param options carries optional attribute values * @return a new instance of InitializeTableFromTextFile */ public InitializeTableFromTextFile initializeTableFromTextFile( - Operand tableHandle, Operand filename, Long keyIndex, - Long valueIndex, InitializeTableFromTextFile.Options... options) { - return InitializeTableFromTextFile.create(scope, tableHandle, filename, keyIndex, valueIndex, options); + Operand tableHandle, + Operand filename, + Long keyIndex, + Long valueIndex, + InitializeTableFromTextFile.Options... options) { + return InitializeTableFromTextFile.create( + scope, tableHandle, filename, keyIndex, valueIndex, options); } /** * Adds v into specified rows of x. - *
+   *
+   * 
    *  Computes y = x; y[i, :] += v; return y.
    *  
* * @param data type for {@code y} output * @param x A {@code Tensor} of type T. * @param i A vector. Indices into the left-most dimension of {@code x}. - * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, + * which must be the same as i's size. * @param data type for {@code InplaceAdd} output and operands * @return a new instance of InplaceAdd */ @@ -3053,6 +3180,8 @@ public InplaceAdd inplaceAdd(Operand x, Operand } /** + * + * *
    *  Subtracts `v` into specified rows of `x`.
    *
@@ -3062,7 +3191,8 @@ public  InplaceAdd inplaceAdd(Operand x, Operand
    * @param  data type for {@code y} output
    * @param x A {@code Tensor} of type T.
    * @param i A vector. Indices into the left-most dimension of {@code x}.
-   * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
+   * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension,
+   *     which must be the same as i's size.
    * @param  data type for {@code InplaceSub} output and operands
    * @return a new instance of InplaceSub
    */
@@ -3071,26 +3201,27 @@ public  InplaceSub inplaceSub(Operand x, Operand
   }
 
   /**
-   * Updates specified rows 'i' with values 'v'.
-   *  Computes {@code x[i, :] = v; return x}.
-   *  

Originally this function is mutative however for compilation we make this - * operation create / operate on a copy of {@code x}. + * Updates specified rows 'i' with values 'v'. Computes {@code x[i, :] = v; return x}. + * + *

Originally this function is mutative however for compilation we make this operation create / + * operate on a copy of {@code x}. * * @param data type for {@code y} output * @param x A tensor of type {@code T}. * @param i A vector. Indices into the left-most dimension of {@code x}. - * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, + * which must be the same as i's size. * @param data type for {@code InplaceUpdate} output and operands * @return a new instance of InplaceUpdate */ - public InplaceUpdate inplaceUpdate(Operand x, Operand i, - Operand v) { + public InplaceUpdate inplaceUpdate( + Operand x, Operand i, Operand v) { return InplaceUpdate.create(scope, x, i, v); } /** - * Checks whether a tensor has been initialized. - * Outputs boolean scalar indicating whether the tensor has been initialized. + * Checks whether a tensor has been initialized. Outputs boolean scalar indicating whether the + * tensor has been initialized. * * @param ref Should be from a {@code Variable} node. May be uninitialized. * @return a new instance of IsVariableInitialized @@ -3100,21 +3231,17 @@ public IsVariableInitialized isVariableInitialized(Operand ref) } /** - * Computes the Kth order statistic of a data set. The current - * implementation uses a binary search requiring exactly 32 passes over - * the input data. The running time is linear with respect to input - * size. The median-of-medians algorithm is probably faster, but is - * difficult to implement efficiently in XLA. The implementation imposes - * a total ordering on floats. The ordering is consistent with the usual - * partial order. Positive NaNs are greater than positive - * infinity. Negative NaNs are less than negative infinity. NaNs with - * distinct payloads are treated as distinct. Subnormal numbers are - * preserved (not flushed to zero). Positive infinity is greater than all - * numbers. Negative infinity is less than all numbers. Positive is - * greater than negative zero. There are less than k values greater than - * the kth order statistic. There are at least k values greater than or - * equal to the Kth order statistic. The semantics are not the same as - * top_k_unique. + * Computes the Kth order statistic of a data set. The current implementation uses a binary search + * requiring exactly 32 passes over the input data. The running time is linear with respect to + * input size. The median-of-medians algorithm is probably faster, but is difficult to implement + * efficiently in XLA. The implementation imposes a total ordering on floats. The ordering is + * consistent with the usual partial order. Positive NaNs are greater than positive infinity. + * Negative NaNs are less than negative infinity. NaNs with distinct payloads are treated as + * distinct. Subnormal numbers are preserved (not flushed to zero). Positive infinity is greater + * than all numbers. Negative infinity is less than all numbers. Positive is greater than negative + * zero. There are less than k values greater than the kth order statistic. There are at least k + * values greater than or equal to the Kth order statistic. The semantics are not the same as + * top_k_unique. * * @param input the input value * @param k the value of the k property @@ -3142,51 +3269,58 @@ public LookupTableExport lookupTableExp } /** - * Looks up keys in a table, outputs the corresponding values. - * The tensor {@code keys} must of the same type as the keys of the table. - * The output {@code values} is of the type of the table values. - *

The scalar {@code default_value} is the value output for keys not present in the - * table. It must also be of the same type as the table values. + * Looks up keys in a table, outputs the corresponding values. The tensor {@code keys} must of the + * same type as the keys of the table. The output {@code values} is of the type of the table + * values. + * + *

The scalar {@code default_value} is the value output for keys not present in the table. It + * must also be of the same type as the table values. * * @param data type for {@code values} output * @param tableHandle Handle to the table. - * @param keys Any shape. Keys to look up. + * @param keys Any shape. Keys to look up. * @param defaultValue the defaultValue value * @param data type for {@code LookupTableFindV2} output and operands * @return a new instance of LookupTableFind */ - public LookupTableFind lookupTableFind(Operand tableHandle, - Operand keys, Operand defaultValue) { + public LookupTableFind lookupTableFind( + Operand tableHandle, + Operand keys, + Operand defaultValue) { return LookupTableFind.create(scope, tableHandle, keys, defaultValue); } /** - * Replaces the contents of the table with the specified keys and values. - * The tensor {@code keys} must be of the same type as the keys of the table. - * The tensor {@code values} must be of the type of the table values. + * Replaces the contents of the table with the specified keys and values. The tensor {@code keys} + * must be of the same type as the keys of the table. The tensor {@code values} must be of the + * type of the table values. * * @param tableHandle Handle to the table. - * @param keys Any shape. Keys to look up. + * @param keys Any shape. Keys to look up. * @param values Values to associate with keys. * @return a new instance of LookupTableImport */ - public LookupTableImport lookupTableImport(Operand tableHandle, - Operand keys, Operand values) { + public LookupTableImport lookupTableImport( + Operand tableHandle, + Operand keys, + Operand values) { return LookupTableImport.create(scope, tableHandle, keys, values); } /** - * Updates the table to associates keys with values. - * The tensor {@code keys} must be of the same type as the keys of the table. - * The tensor {@code values} must be of the type of the table values. + * Updates the table to associates keys with values. The tensor {@code keys} must be of the same + * type as the keys of the table. The tensor {@code values} must be of the type of the table + * values. * * @param tableHandle Handle to the table. - * @param keys Any shape. Keys to look up. + * @param keys Any shape. Keys to look up. * @param values Values to associate with keys. * @return a new instance of LookupTableInsert */ - public LookupTableInsert lookupTableInsert(Operand tableHandle, - Operand keys, Operand values) { + public LookupTableInsert lookupTableInsert( + Operand tableHandle, + Operand keys, + Operand values) { return LookupTableInsert.create(scope, tableHandle, keys, values); } @@ -3201,9 +3335,8 @@ public LookupTableSize lookupTableSize(Operand tableHandle) { } /** - * Forwards the input to the output. - * This operator represents the loop termination condition used by the - * "pivot" switches of a loop. + * Forwards the input to the output. This operator represents the loop termination condition used + * by the "pivot" switches of a loop. * * @param input A boolean scalar, representing the branch predicate of the Switch op. * @return a new instance of LoopCond @@ -3213,11 +3346,10 @@ public LoopCond loopCond(Operand input) { } /** - * Make all elements in the non-Batch dimension unique, but "close" to - * their initial value. Never returns a sub-normal number. Never returns - * zero. The sign of each input element is always identical to the sign - * of the corresponding output element. Behavior for infinite elements is - * undefined. Behavior for subnormal elements is undefined. + * Make all elements in the non-Batch dimension unique, but "close" to their initial + * value. Never returns a sub-normal number. Never returns zero. The sign of each input element is + * always identical to the sign of the corresponding output element. Behavior for infinite + * elements is undefined. Behavior for subnormal elements is undefined. * * @param input the input value * @return a new instance of MakeUnique @@ -3244,15 +3376,14 @@ public MapClear mapClear(List> dtypes, MapClear.Options.. * @param options carries optional attribute values * @return a new instance of MapIncompleteSize */ - public MapIncompleteSize mapIncompleteSize(List> dtypes, - MapIncompleteSize.Options... options) { + public MapIncompleteSize mapIncompleteSize( + List> dtypes, MapIncompleteSize.Options... options) { return MapIncompleteSize.create(scope, dtypes, options); } /** - * Op peeks at the values at the specified key. If the - * underlying container does not contain this key - * this op will block until it does. + * Op peeks at the values at the specified key. If the underlying container does not contain this + * key this op will block until it does. * * @param key the key value * @param indices the indices value @@ -3260,8 +3391,11 @@ public MapIncompleteSize mapIncompleteSize(List> dtypes, * @param options carries optional attribute values * @return a new instance of MapPeek */ - public MapPeek mapPeek(Operand key, Operand indices, - List> dtypes, MapPeek.Options... options) { + public MapPeek mapPeek( + Operand key, + Operand indices, + List> dtypes, + MapPeek.Options... options) { return MapPeek.create(scope, key, indices, dtypes, options); } @@ -3281,22 +3415,24 @@ public MapSize mapSize(List> dtypes, MapSize.Options... o * * @param key int64 * @param indices the indices value - * @param values a list of tensors - * dtypes A list of data types that inserted values should adhere to. + * @param values a list of tensors dtypes A list of data types that inserted values should adhere + * to. * @param dtypes the value of the dtypes property * @param options carries optional attribute values * @return a new instance of MapStage */ - public MapStage mapStage(Operand key, Operand indices, - Iterable> values, List> dtypes, + public MapStage mapStage( + Operand key, + Operand indices, + Iterable> values, + List> dtypes, MapStage.Options... options) { return MapStage.create(scope, key, indices, values, dtypes, options); } /** - * Op removes and returns the values associated with the key - * from the underlying container. If the underlying container - * does not contain this key, the op will block until it does. + * Op removes and returns the values associated with the key from the underlying container. If the + * underlying container does not contain this key, the op will block until it does. * * @param key the key value * @param indices the indices value @@ -3304,52 +3440,55 @@ public MapStage mapStage(Operand key, Operand indices, * @param options carries optional attribute values * @return a new instance of MapUnstage */ - public MapUnstage mapUnstage(Operand key, Operand indices, - List> dtypes, MapUnstage.Options... options) { + public MapUnstage mapUnstage( + Operand key, + Operand indices, + List> dtypes, + MapUnstage.Options... options) { return MapUnstage.create(scope, key, indices, dtypes, options); } /** - * Op removes and returns a random (key, value) - * from the underlying container. If the underlying container - * does not contain elements, the op will block until it does. + * Op removes and returns a random (key, value) from the underlying container. If the underlying + * container does not contain elements, the op will block until it does. * * @param indices the indices value * @param dtypes the value of the dtypes property * @param options carries optional attribute values * @return a new instance of MapUnstageNoKey */ - public MapUnstageNoKey mapUnstageNoKey(Operand indices, - List> dtypes, MapUnstageNoKey.Options... options) { + public MapUnstageNoKey mapUnstageNoKey( + Operand indices, + List> dtypes, + MapUnstageNoKey.Options... options) { return MapUnstageNoKey.create(scope, indices, dtypes, options); } /** - * Computes the maximum of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the maximum of elements across dimensions of a tensor. Reduces {@code input} along the + * dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank of the tensor is + * reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the reduced + * dimensions are retained with length 1. * * @param data type for {@code output} output * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @param data type for {@code Max} output and operands * @return a new instance of Max */ - public Max max(Operand input, Operand axis, - Max.Options... options) { + public Max max( + Operand input, Operand axis, Max.Options... options) { return Max.create(scope, input, axis, options); } /** - * Forwards the value of an available tensor from {@code inputs} to {@code output}. - * {@code Merge} waits for at least one of the tensors in {@code inputs} to become available. - * It is usually combined with {@code Switch} to implement branching. - *

{@code Merge} forwards the first tensor to become available to {@code output}, and sets - * {@code value_index} to its index in {@code inputs}. + * Forwards the value of an available tensor from {@code inputs} to {@code output}. {@code Merge} + * waits for at least one of the tensors in {@code inputs} to become available. It is usually + * combined with {@code Switch} to implement branching. + * + *

{@code Merge} forwards the first tensor to become available to {@code output}, and sets + * {@code value_index} to its index in {@code inputs}. * * @param data type for {@code output} output * @param inputs The input tensors, exactly one of which will become available. @@ -3361,39 +3500,40 @@ public Merge merge(Iterable> inputs) { } /** - * Computes the minimum of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the minimum of elements across dimensions of a tensor. Reduces {@code input} along the + * dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank of the tensor is + * reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the reduced + * dimensions are retained with length 1. * * @param data type for {@code output} output * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @param data type for {@code Min} output and operands * @return a new instance of Min */ - public Min min(Operand input, Operand axis, - Min.Options... options) { + public Min min( + Operand input, Operand axis, Min.Options... options) { return Min.create(scope, input, axis, options); } /** - * Pads a tensor with mirrored values. - * This operation pads a {@code input} with mirrored values according to the {@code paddings} - * you specify. {@code paddings} is an integer tensor with shape {@code [n, 2]}, where n is - * the rank of {@code input}. For each dimension D of {@code input}, {@code paddings[D, 0]} indicates - * how many values to add before the contents of {@code input} in that dimension, and - * {@code paddings[D, 1]} indicates how many values to add after the contents of {@code input} - * in that dimension. Both {@code paddings[D, 0]} and {@code paddings[D, 1]} must be no greater - * than {@code input.dim_size(D)} (or {@code input.dim_size(D) - 1}) if {@code copy_border} is true - * (if false, respectively). - *

The padded size of each dimension D of the output is: - *

{@code paddings(D, 0) + input.dim_size(D) + paddings(D, 1)} - *

For example: - *

+   * Pads a tensor with mirrored values. This operation pads a {@code input} with mirrored values
+   * according to the {@code paddings} you specify. {@code paddings} is an integer tensor with shape
+   * {@code [n, 2]}, where n is the rank of {@code input}. For each dimension D of {@code input},
+   * {@code paddings[D, 0]} indicates how many values to add before the contents of {@code input} in
+   * that dimension, and {@code paddings[D, 1]} indicates how many values to add after the contents
+   * of {@code input} in that dimension. Both {@code paddings[D, 0]} and {@code paddings[D, 1]} must
+   * be no greater than {@code input.dim_size(D)} (or {@code input.dim_size(D) - 1}) if {@code
+   * copy_border} is true (if false, respectively).
+   *
+   * 

The padded size of each dimension D of the output is: + * + *

{@code paddings(D, 0) + input.dim_size(D) + paddings(D, 1)} + * + *

For example: + * + *

    *  # 't' is [[1, 2, 3], [4, 5, 6]].
    *  # 'paddings' is [[1, 1]], [2, 2]].
    *  # 'mode' is SYMMETRIC.
@@ -3406,35 +3546,33 @@ public  Min min(Operand input, Operand data type for {@code output} output
    * @param input The input tensor to be padded.
-   * @param paddings A two-column matrix specifying the padding sizes. The number of
-   *  rows must be the same as the rank of {@code input}.
-   * @param mode Either {@code REFLECT} or {@code SYMMETRIC}. In reflect mode the padded regions
-   *  do not include the borders, while in symmetric mode the padded regions
-   *  do include the borders. For example, if {@code input} is {@code [1, 2, 3]} and {@code paddings}
-   *  is {@code [0, 2]}, then the output is {@code [1, 2, 3, 2, 1]} in reflect mode, and
-   *  it is {@code [1, 2, 3, 3, 2]} in symmetric mode.
+   * @param paddings A two-column matrix specifying the padding sizes. The number of rows must be
+   *     the same as the rank of {@code input}.
+   * @param mode Either {@code REFLECT} or {@code SYMMETRIC}. In reflect mode the padded regions do
+   *     not include the borders, while in symmetric mode the padded regions do include the borders.
+   *     For example, if {@code input} is {@code [1, 2, 3]} and {@code paddings} is {@code [0, 2]},
+   *     then the output is {@code [1, 2, 3, 2, 1]} in reflect mode, and it is {@code [1, 2, 3, 3,
+   *     2]} in symmetric mode.
    * @param  data type for {@code MirrorPad} output and operands
    * @return a new instance of MirrorPad
    */
-  public  MirrorPad mirrorPad(Operand input,
-      Operand paddings, String mode) {
+  public  MirrorPad mirrorPad(
+      Operand input, Operand paddings, String mode) {
     return MirrorPad.create(scope, input, paddings, mode);
   }
 
   /**
-   * Wraps an arbitrary MLIR computation expressed as a module with a main() function.
-   *  This operation does not have an associated kernel and is not intended to be
-   *  executed in a regular TensorFlow session. Instead it is intended to be used for
-   *  testing or for special case where a user intends to pass custom MLIR computation
-   *  through a TensorFlow graph with the intent of having custom tooling processing
-   *  it downstream (when targeting a different environment, like TensorFlow lite for
-   *  example).
-   *  The MLIR module is expected to have a main() function that will be used as an
-   *  entry point. The inputs to the operations will be passed as argument to the
-   *  main() function and the returned values of the main function mapped to the
-   *  outputs.
-   *  Example usage:
-   *  
+   * Wraps an arbitrary MLIR computation expressed as a module with a main() function. This
+   * operation does not have an associated kernel and is not intended to be executed in a regular
+   * TensorFlow session. Instead it is intended to be used for testing or for special case where a
+   * user intends to pass custom MLIR computation through a TensorFlow graph with the intent of
+   * having custom tooling processing it downstream (when targeting a different environment, like
+   * TensorFlow lite for example). The MLIR module is expected to have a main() function that will
+   * be used as an entry point. The inputs to the operations will be passed as argument to the
+   * main() function and the returned values of the main function mapped to the outputs. Example
+   * usage:
+   *
+   * 
    *  import tensorflow as tf
    *  from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
    *
@@ -3457,21 +3595,21 @@ public  MirrorPad mirrorPad(Operand input,
    * @param Toutputs the value of the Toutputs property
    * @return a new instance of MlirPassthroughOp
    */
-  public MlirPassthroughOp mlirPassthroughOp(Iterable> inputs, String mlirModule,
-      List> Toutputs) {
+  public MlirPassthroughOp mlirPassthroughOp(
+      Iterable> inputs, String mlirModule, List> Toutputs) {
     return MlirPassthroughOp.create(scope, inputs, mlirModule, Toutputs);
   }
 
   /**
-   * Creates an empty hash table that uses tensors as the backing store.
-   *  It uses "open addressing" with quadratic reprobing to resolve
-   *  collisions.
-   *  

This op creates a mutable hash table, specifying the type of its keys and - * values. Each value must be a scalar. Data can be inserted into the table using - * the insert operations. It does not support the initialization operation. + * Creates an empty hash table that uses tensors as the backing store. It uses "open + * addressing" with quadratic reprobing to resolve collisions. + * + *

This op creates a mutable hash table, specifying the type of its keys and values. Each value + * must be a scalar. Data can be inserted into the table using the insert operations. It does not + * support the initialization operation. * - * @param emptyKey The key used to represent empty key buckets internally. Must not - * be used in insert or lookup operations. + * @param emptyKey The key used to represent empty key buckets internally. Must not be used in + * insert or lookup operations. * @param deletedKey the deletedKey value * @param valueDtype Type of the table values. * @param options carries optional attribute values @@ -3480,16 +3618,17 @@ public MlirPassthroughOp mlirPassthroughOp(Iterable> inputs, String m * @return a new instance of MutableDenseHashTable */ public MutableDenseHashTable mutableDenseHashTable( - Operand emptyKey, Operand deletedKey, Class valueDtype, + Operand emptyKey, + Operand deletedKey, + Class valueDtype, MutableDenseHashTable.Options... options) { return MutableDenseHashTable.create(scope, emptyKey, deletedKey, valueDtype, options); } /** - * Creates an empty hash table. - * This op creates a mutable hash table, specifying the type of its keys and - * values. Each value must be a scalar. Data can be inserted into the table using - * the insert operations. It does not support the initialization operation. + * Creates an empty hash table. This op creates a mutable hash table, specifying the type of its + * keys and values. Each value must be a scalar. Data can be inserted into the table using the + * insert operations. It does not support the initialization operation. * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. @@ -3498,16 +3637,15 @@ public MutableDenseHashTable mutableDenseHash * @param data type for {@code MutableHashTableV2} output and operands * @return a new instance of MutableHashTable */ - public MutableHashTable mutableHashTable(Class keyDtype, - Class valueDtype, MutableHashTable.Options... options) { + public MutableHashTable mutableHashTable( + Class keyDtype, Class valueDtype, MutableHashTable.Options... options) { return MutableHashTable.create(scope, keyDtype, valueDtype, options); } /** - * Creates an empty hash table. - * This op creates a mutable hash table, specifying the type of its keys and - * values. Each value must be a vector. Data can be inserted into the table using - * the insert operations. It does not support the initialization operation. + * Creates an empty hash table. This op creates a mutable hash table, specifying the type of its + * keys and values. Each value must be a vector. Data can be inserted into the table using the + * insert operations. It does not support the initialization operation. * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. @@ -3532,11 +3670,13 @@ public Mutex mutex(Mutex.Options... options) { } /** - * Locks a mutex resource. The output is the lock. So long as the lock tensor - * is alive, any other request to use {@code MutexLock} with this mutex will wait. - *

This is particularly useful for creating a critical section when used in - * conjunction with {@code MutexLockIdentity}: - *

+   * Locks a mutex resource. The output is the lock. So long as the lock tensor is alive, any other
+   * request to use {@code MutexLock} with this mutex will wait.
+   *
+   * 

This is particularly useful for creating a critical section when used in conjunction with + * {@code MutexLockIdentity}: + * + *

    *
    *  mutex = mutex_v2(
    *    shared_name=handle_name, container=container, name=name)
@@ -3558,14 +3698,16 @@ public Mutex mutex(Mutex.Options... options) {
    *    with ops.control_dependencies([ensure_lock_exists]):
    *      return nest.map_structure(tf.identity, r)
    *  
- *

While {@code fn} is running in the critical section, no other functions which wish to - * use this critical section may run. - *

Often the use case is that two executions of the same graph, in parallel, - * wish to run {@code fn}; and we wish to ensure that only one of them executes - * at a time. This is especially important if {@code fn} modifies one or more - * variables at a time. - *

It is also useful if two separate functions must share a resource, but we - * wish to ensure the usage is exclusive. + * + *

While {@code fn} is running in the critical section, no other functions which wish to use + * this critical section may run. + * + *

Often the use case is that two executions of the same graph, in parallel, wish to run {@code + * fn}; and we wish to ensure that only one of them executes at a time. This is especially + * important if {@code fn} modifies one or more variables at a time. + * + *

It is also useful if two separate functions must share a resource, but we wish to ensure the + * usage is exclusive. * * @param mutex The mutex resource to lock. * @return a new instance of MutexLock @@ -3596,52 +3738,65 @@ public NoOp noOp() { } /** - * Returns a one-hot tensor. - * The locations represented by indices in {@code indices} take value {@code on_value}, - * while all other locations take value {@code off_value}. - *

If the input {@code indices} is rank {@code N}, the output will have rank {@code N+1}, - * The new axis is created at dimension {@code axis} (default: the new axis is - * appended at the end). - *

If {@code indices} is a scalar the output shape will be a vector of length {@code depth}. - *

If {@code indices} is a vector of length {@code features}, the output shape will be: - *

+   * Returns a one-hot tensor. The locations represented by indices in {@code indices} take value
+   * {@code on_value}, while all other locations take value {@code off_value}.
+   *
+   * 

If the input {@code indices} is rank {@code N}, the output will have rank {@code N+1}, The + * new axis is created at dimension {@code axis} (default: the new axis is appended at the end). + * + *

If {@code indices} is a scalar the output shape will be a vector of length {@code depth}. + * + *

If {@code indices} is a vector of length {@code features}, the output shape will be: + * + *

    *    features x depth if axis == -1
    *    depth x features if axis == 0
    *  
- *

If {@code indices} is a matrix (batch) with shape {@code [batch, features]}, - * the output shape will be: - *

+   *
+   * 

If {@code indices} is a matrix (batch) with shape {@code [batch, features]}, the output + * shape will be: + * + *

    *    batch x features x depth if axis == -1
    *    batch x depth x features if axis == 1
    *    depth x batch x features if axis == 0
    *  
- * Examples
- *

Suppose that - *

+   *
+   * Examples
+ * + *

Suppose that + * + *

    *    indices = [0, 2, -1, 1]
    *    depth = 3
    *    on_value = 5.0
    *    off_value = 0.0
    *    axis = -1
    *  
- *

Then output is {@code [4 x 3]}: - *

+   *
+   * 

Then output is {@code [4 x 3]}: + * + *

    *  output =
    *    [5.0 0.0 0.0]  // one_hot(0)
    *    [0.0 0.0 5.0]  // one_hot(2)
    *    [0.0 0.0 0.0]  // one_hot(-1)
    *    [0.0 5.0 0.0]  // one_hot(1)
    *  
- *

Suppose that - *

+   *
+   * 

Suppose that + * + *

    *    indices = [0, 2, -1, 1]
    *    depth = 3
    *    on_value = 0.0
    *    off_value = 3.0
    *    axis = 0
    *  
- *

Then output is {@code [3 x 4]}: - *

+   *
+   * 

Then output is {@code [3 x 4]}: + * + *

    *  output =
    *    [0.0 3.0 3.0 3.0]
    *    [3.0 3.0 3.0 0.0]
@@ -3652,16 +3807,20 @@ public NoOp noOp() {
    *  //          ^        one_hot(-1)
    *  //              ^    one_hot(1)
    *  
- *

Suppose that - *

+   *
+   * 

Suppose that + * + *

    *    indices = [[0, 2], [1, -1]]
    *    depth = 3
    *    on_value = 1.0
    *    off_value = 0.0
    *    axis = -1
    *  
- *

Then output is {@code [2 x 2 x 3]}: - *

+   *
+   * 

Then output is {@code [2 x 2 x 3]}: + * + *

    *  output =
    *    [
    *      [1.0, 0.0, 0.0]  // one_hot(0)
@@ -3681,8 +3840,12 @@ public NoOp noOp() {
    * @param  data type for {@code OneHot} output and operands
    * @return a new instance of OneHot
    */
-  public  OneHot oneHot(Operand indices,
-      Operand depth, Operand onValue, Operand offValue, OneHot.Options... options) {
+  public  OneHot oneHot(
+      Operand indices,
+      Operand depth,
+      Operand onValue,
+      Operand offValue,
+      OneHot.Options... options) {
     return OneHot.create(scope, indices, depth, onValue, offValue, options);
   }
 
@@ -3717,8 +3880,8 @@ public  OnesLike onesLike(Operand x) {
    * @param options carries optional attribute values
    * @return a new instance of OrderedMapClear
    */
-  public OrderedMapClear orderedMapClear(List> dtypes,
-      OrderedMapClear.Options... options) {
+  public OrderedMapClear orderedMapClear(
+      List> dtypes, OrderedMapClear.Options... options) {
     return OrderedMapClear.create(scope, dtypes, options);
   }
 
@@ -3729,16 +3892,14 @@ public OrderedMapClear orderedMapClear(List> dtypes,
    * @param options carries optional attribute values
    * @return a new instance of OrderedMapIncompleteSize
    */
-  public OrderedMapIncompleteSize orderedMapIncompleteSize(List> dtypes,
-      OrderedMapIncompleteSize.Options... options) {
+  public OrderedMapIncompleteSize orderedMapIncompleteSize(
+      List> dtypes, OrderedMapIncompleteSize.Options... options) {
     return OrderedMapIncompleteSize.create(scope, dtypes, options);
   }
 
   /**
-   * Op peeks at the values at the specified key.  If the
-   *  underlying container does not contain this key
-   *  this op will block until it does.   This Op is optimized for
-   *  performance.
+   * Op peeks at the values at the specified key. If the underlying container does not contain this
+   * key this op will block until it does. This Op is optimized for performance.
    *
    * @param key the key value
    * @param indices the indices value
@@ -3746,8 +3907,11 @@ public OrderedMapIncompleteSize orderedMapIncompleteSize(List key, Operand indices,
-      List> dtypes, OrderedMapPeek.Options... options) {
+  public OrderedMapPeek orderedMapPeek(
+      Operand key,
+      Operand indices,
+      List> dtypes,
+      OrderedMapPeek.Options... options) {
     return OrderedMapPeek.create(scope, key, indices, dtypes, options);
   }
 
@@ -3758,33 +3922,35 @@ public OrderedMapPeek orderedMapPeek(Operand key, Operand indice
    * @param options carries optional attribute values
    * @return a new instance of OrderedMapSize
    */
-  public OrderedMapSize orderedMapSize(List> dtypes,
-      OrderedMapSize.Options... options) {
+  public OrderedMapSize orderedMapSize(
+      List> dtypes, OrderedMapSize.Options... options) {
     return OrderedMapSize.create(scope, dtypes, options);
   }
 
   /**
-   * Stage (key, values) in the underlying container which behaves like a ordered
-   *  associative container.   Elements are ordered by key.
+   * Stage (key, values) in the underlying container which behaves like a ordered associative
+   * container. Elements are ordered by key.
    *
    * @param key int64
    * @param indices the indices value
-   * @param values a list of tensors
-   *  dtypes A list of data types that inserted values should adhere to.
+   * @param values a list of tensors dtypes A list of data types that inserted values should adhere
+   *     to.
    * @param dtypes the value of the dtypes property
    * @param options carries optional attribute values
    * @return a new instance of OrderedMapStage
    */
-  public OrderedMapStage orderedMapStage(Operand key, Operand indices,
-      Iterable> values, List> dtypes,
+  public OrderedMapStage orderedMapStage(
+      Operand key,
+      Operand indices,
+      Iterable> values,
+      List> dtypes,
       OrderedMapStage.Options... options) {
     return OrderedMapStage.create(scope, key, indices, values, dtypes, options);
   }
 
   /**
-   * Op removes and returns the values associated with the key
-   *  from the underlying container.   If the underlying container
-   *  does not contain this key, the op will block until it does.
+   * Op removes and returns the values associated with the key from the underlying container. If the
+   * underlying container does not contain this key, the op will block until it does.
    *
    * @param key the key value
    * @param indices the indices value
@@ -3792,39 +3958,47 @@ public OrderedMapStage orderedMapStage(Operand key, Operand indi
    * @param options carries optional attribute values
    * @return a new instance of OrderedMapUnstage
    */
-  public OrderedMapUnstage orderedMapUnstage(Operand key, Operand indices,
-      List> dtypes, OrderedMapUnstage.Options... options) {
+  public OrderedMapUnstage orderedMapUnstage(
+      Operand key,
+      Operand indices,
+      List> dtypes,
+      OrderedMapUnstage.Options... options) {
     return OrderedMapUnstage.create(scope, key, indices, dtypes, options);
   }
 
   /**
-   * Op removes and returns the (key, value) element with the smallest
-   *  key from the underlying container.   If the underlying container
-   *  does not contain elements, the op will block until it does.
+   * Op removes and returns the (key, value) element with the smallest key from the underlying
+   * container. If the underlying container does not contain elements, the op will block until it
+   * does.
    *
    * @param indices the indices value
    * @param dtypes the value of the dtypes property
    * @param options carries optional attribute values
    * @return a new instance of OrderedMapUnstageNoKey
    */
-  public OrderedMapUnstageNoKey orderedMapUnstageNoKey(Operand indices,
-      List> dtypes, OrderedMapUnstageNoKey.Options... options) {
+  public OrderedMapUnstageNoKey orderedMapUnstageNoKey(
+      Operand indices,
+      List> dtypes,
+      OrderedMapUnstageNoKey.Options... options) {
     return OrderedMapUnstageNoKey.create(scope, indices, dtypes, options);
   }
 
   /**
-   * Pads a tensor.
-   *  This operation pads {@code input} according to the {@code paddings} and {@code constant_values}
-   *  you specify. {@code paddings} is an integer tensor with shape {@code [Dn, 2]}, where n is
-   *  the rank of {@code input}. For each dimension D of {@code input}, {@code paddings[D, 0]} indicates
-   *  how many padding values to add before the contents of {@code input} in that dimension,
-   *  and {@code paddings[D, 1]} indicates how many padding values to add after the contents
-   *  of {@code input} in that dimension. {@code constant_values} is a scalar tensor of the same
-   *  type as {@code input} that indicates the value to use for padding {@code input}.
-   *  

The padded size of each dimension D of the output is: - *

{@code paddings(D, 0) + input.dim_size(D) + paddings(D, 1)} - *

For example: - *

+   * Pads a tensor. This operation pads {@code input} according to the {@code paddings} and {@code
+   * constant_values} you specify. {@code paddings} is an integer tensor with shape {@code [Dn, 2]},
+   * where n is the rank of {@code input}. For each dimension D of {@code input}, {@code paddings[D,
+   * 0]} indicates how many padding values to add before the contents of {@code input} in that
+   * dimension, and {@code paddings[D, 1]} indicates how many padding values to add after the
+   * contents of {@code input} in that dimension. {@code constant_values} is a scalar tensor of the
+   * same type as {@code input} that indicates the value to use for padding {@code input}.
+   *
+   * 

The padded size of each dimension D of the output is: + * + *

{@code paddings(D, 0) + input.dim_size(D) + paddings(D, 1)} + * + *

For example: + * + *

    *  # 't' is [[1, 1], [2, 2]]
    *  # 'paddings' is [[1, 1], [2, 2]]
    *  # 'constant_values' is 0
@@ -3842,66 +4016,76 @@ public OrderedMapUnstageNoKey orderedMapUnstageNoKey(Operand indices,
    * @param  data type for {@code PadV2} output and operands
    * @return a new instance of Pad
    */
-  public  Pad pad(Operand input, Operand paddings,
-      Operand constantValues) {
+  public  Pad pad(
+      Operand input, Operand paddings, Operand constantValues) {
     return Pad.create(scope, input, paddings, constantValues);
   }
 
   /**
-   * Concatenates a list of {@code N} tensors along the first dimension.
-   *  The input tensors are all required to have size 1 in the first dimension.
-   *  

For example: - *

+   * Concatenates a list of {@code N} tensors along the first dimension. The input tensors are all
+   * required to have size 1 in the first dimension.
+   *
+   * 

For example: + * + *

    *  # 'x' is [[1, 4]]
    *  # 'y' is [[2, 5]]
    *  # 'z' is [[3, 6]]
    *  parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
    *  
- *

The difference between concat and parallel_concat is that concat requires all - * of the inputs be computed before the operation will begin but doesn't require - * that the input shapes be known during graph construction. Parallel concat - * will copy pieces of the input into the output as they become available, in - * some situations this can provide a performance benefit. + * + *

The difference between concat and parallel_concat is that concat requires all of the inputs + * be computed before the operation will begin but doesn't require that the input shapes be known + * during graph construction. Parallel concat will copy pieces of the input into the output as + * they become available, in some situations this can provide a performance benefit. * * @param data type for {@code output} output - * @param values Tensors to be concatenated. All must have size 1 in the first dimension - * and same shape. - * @param shape the final shape of the result; should be equal to the shapes of any input - * but with the number of input values in the first dimension. + * @param values Tensors to be concatenated. All must have size 1 in the first dimension and same + * shape. + * @param shape the final shape of the result; should be equal to the shapes of any input but with + * the number of input values in the first dimension. * @param data type for {@code ParallelConcat} output and operands * @return a new instance of ParallelConcat */ - public ParallelConcat parallelConcat(Iterable> values, - Shape shape) { + public ParallelConcat parallelConcat( + Iterable> values, Shape shape) { return ParallelConcat.create(scope, values, shape); } /** - * Interleave the values from the {@code data} tensors into a single tensor. - * Builds a merged tensor such that - *

+   * Interleave the values from the {@code data} tensors into a single tensor. Builds a merged
+   * tensor such that
+   *
+   * 
    *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
    *  
- *

For example, if each {@code indices[m]} is scalar or vector, we have - *

+   *
+   * 

For example, if each {@code indices[m]} is scalar or vector, we have + * + *

    *      # Scalar indices:
    *      merged[indices[m], ...] = data[m][...]
    *
    *      # Vector indices:
    *      merged[indices[m][i], ...] = data[m][i, ...]
    *  
- *

Each {@code data[i].shape} must start with the corresponding {@code indices[i].shape}, - * and the rest of {@code data[i].shape} must be constant w.r.t. {@code i}. That is, we - * must have {@code data[i].shape = indices[i].shape + constant}. In terms of this - * {@code constant}, the output shape is - *

+   *
+   * 

Each {@code data[i].shape} must start with the corresponding {@code indices[i].shape}, and + * the rest of {@code data[i].shape} must be constant w.r.t. {@code i}. That is, we must have + * {@code data[i].shape = indices[i].shape + constant}. In terms of this {@code constant}, the + * output shape is + * + *

    *  merged.shape = [max(indices)] + constant
    *  
- *

Values may be merged in parallel, so if an index appears in both {@code indices[m][i]} - * and {@code indices[n][j]}, the result may be invalid. This differs from the normal - * DynamicStitch operator that defines the behavior in that case. - *

For example: - *

+   *
+   * 

Values may be merged in parallel, so if an index appears in both {@code indices[m][i]} and + * {@code indices[n][j]}, the result may be invalid. This differs from the normal DynamicStitch + * operator that defines the behavior in that case. + * + *

For example: + * + *

    *      indices[0] = 6
    *      indices[1] = [4, 1]
    *      indices[2] = [[5, 2], [0, 3]]
@@ -3911,9 +4095,11 @@ public  ParallelConcat parallelConcat(Iterable> v
    *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
    *                [51, 52], [61, 62]]
    *  
- *

This method can be used to merge partitions created by {@code dynamic_partition} - * as illustrated on the following example: - *

+   *
+   * 

This method can be used to merge partitions created by {@code dynamic_partition} as + * illustrated on the following example: + * + *

    *      # Apply function (increments x_i) on elements for which a certain condition
    *      # apply (x_i != -1 in this example).
    *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
@@ -3927,9 +4113,9 @@ public  ParallelConcat parallelConcat(Iterable> v
    *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
    *      # unchanged.
    *  
- *
- * - *
+ * + *
* * @param data type for {@code merged} output * @param indices the indices value @@ -3945,30 +4131,35 @@ public ParallelDynamicStitch parallelDynamicStitch( /** * returns {@code f(inputs)}, where {@code f}'s body is placed and partitioned. * - *

Selects between {@link StatefulPartitionedCall} and {@link StatelessPartitionedCall} based on the statefulness of the function arguments. + *

Selects between {@link StatefulPartitionedCall} and {@link StatelessPartitionedCall} based + * on the statefulness of the function arguments. * * @param args A list of input tensors. * @param Tout A list of output types. - * @param f

+   * @param f
+   *     
    *    A function that takes 'args', a list of tensors, and returns 'output',
    *    another list of tensors. Input and output types are specified by 'Tin'
    *    and 'Tout'. The function body of f will be placed and partitioned across
    *    devices, setting this op apart from the regular Call op. This op is
    *    stateful.
    *  
+ * * @param options carries optional attribute values * @return a new instance of PartitionedCall */ - public PartitionedCall partitionedCall(Iterable> args, - List> Tout, ConcreteFunction f, PartitionedCall.Options... options) { + public PartitionedCall partitionedCall( + Iterable> args, + List> Tout, + ConcreteFunction f, + PartitionedCall.Options... options) { return PartitionedCall.create(scope, args, Tout, f, options); } /** - * A placeholder op for a value that will be fed into the computation. - * N.B. This operation will fail with an error if it is executed. It is - * intended as a way to represent a value that will always be fed, and to - * provide attrs that enable the fed value to be checked at runtime. + * A placeholder op for a value that will be fed into the computation. N.B. This operation will + * fail with an error if it is executed. It is intended as a way to represent a value that will + * always be fed, and to provide attrs that enable the fed value to be checked at runtime. * * @param data type for {@code output} output * @param dtype The type of elements in the tensor. @@ -3976,8 +4167,8 @@ public PartitionedCall partitionedCall(Iterable> args, * @param data type for {@code Placeholder} output and operands * @return a new instance of Placeholder */ - public Placeholder placeholder(Class dtype, - Placeholder.Options... options) { + public Placeholder placeholder( + Class dtype, Placeholder.Options... options) { return Placeholder.create(scope, dtype, options); } @@ -3990,14 +4181,13 @@ public Placeholder placeholder(Class dtype, * @param data type for {@code PlaceholderWithDefault} output and operands * @return a new instance of PlaceholderWithDefault */ - public PlaceholderWithDefault placeholderWithDefault(Operand input, - Shape shape) { + public PlaceholderWithDefault placeholderWithDefault( + Operand input, Shape shape) { return PlaceholderWithDefault.create(scope, input, shape); } /** - * Prints a string scalar. - * Prints a string scalar to the desired output_stream. + * Prints a string scalar. Prints a string scalar to the desired output_stream. * * @param input The string scalar to print. * @param options carries optional attribute values @@ -4008,22 +4198,20 @@ public Print print(Operand input, Print.Options... options) { } /** - * Computes the product of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the product of elements across dimensions of a tensor. Reduces {@code input} along the + * dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank of the tensor is + * reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the reduced + * dimensions are retained with length 1. * * @param data type for {@code output} output * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @param data type for {@code Prod} output and operands * @return a new instance of Prod */ - public Prod prod(Operand input, Operand axis, - Prod.Options... options) { + public Prod prod( + Operand input, Operand axis, Prod.Options... options) { return Prod.create(scope, input, axis, options); } @@ -4038,17 +4226,21 @@ public Prod prod(Operand input, Operand data type for {@code QuantizedReshape} output and operands * @return a new instance of QuantizedReshape */ - public QuantizedReshape quantizedReshape(Operand tensor, - Operand shape, Operand inputMin, Operand inputMax) { + public QuantizedReshape quantizedReshape( + Operand tensor, + Operand shape, + Operand inputMin, + Operand inputMax) { return QuantizedReshape.create(scope, tensor, shape, inputMin, inputMax); } /** - * Creates a sequence of numbers. - * This operation creates a sequence of numbers that begins at {@code start} and - * extends by increments of {@code delta} up to but not including {@code limit}. - *

For example: - *

+   * Creates a sequence of numbers. This operation creates a sequence of numbers that begins at
+   * {@code start} and extends by increments of {@code delta} up to but not including {@code limit}.
+   *
+   * 

For example: + * + *

    *  # 'start' is 3
    *  # 'limit' is 18
    *  # 'delta' is 3
@@ -4067,17 +4259,20 @@ public  Range range(Operand start, Operand limit, Op
   }
 
   /**
-   * Returns the rank of a tensor.
-   *  This operation returns an integer representing the rank of {@code input}.
-   *  

For example: - *

+   * Returns the rank of a tensor. This operation returns an integer representing the rank of {@code
+   * input}.
+   *
+   * 

For example: + * + *

    *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
    *  # shape of tensor 't' is [2, 2, 3]
    *  rank(t) ==> 3
    *  
- *

Note: The rank of a tensor is not the same as the rank of a matrix. The rank - * of a tensor is the number of indices required to uniquely select each element - * of the tensor. Rank is also known as "order", "degree", or "ndims." + * + *

Note: The rank of a tensor is not the same as the rank of a matrix. The + * rank of a tensor is the number of indices required to uniquely select each element of the + * tensor. Rank is also known as "order", "degree", or "ndims." * * @param input the input value * @return a new instance of Rank @@ -4087,12 +4282,11 @@ public Rank rank(Operand input) { } /** - * Reads the value of a variable. - * The tensor returned by this operation is immutable. - *

The value returned by this operation is guaranteed to be influenced by all the - * writes on which this operation depends directly or indirectly, and to not be - * influenced by any of the writes which depend directly or indirectly on this - * operation. + * Reads the value of a variable. The tensor returned by this operation is immutable. + * + *

The value returned by this operation is guaranteed to be influenced by all the writes on + * which this operation depends directly or indirectly, and to not be influenced by any of the + * writes which depend directly or indirectly on this operation. * * @param data type for {@code value} output * @param resource handle to the resource in which to store the variable. @@ -4100,124 +4294,112 @@ public Rank rank(Operand input) { * @param data type for {@code ReadVariableOp} output and operands * @return a new instance of ReadVariableOp */ - public ReadVariableOp readVariableOp(Operand resource, - Class dtype) { + public ReadVariableOp readVariableOp( + Operand resource, Class dtype) { return ReadVariableOp.create(scope, resource, dtype); } /** - * Computes the "logical and" of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the "logical and" of elements across dimensions of a tensor. Reduces {@code + * input} along the dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank + * of the tensor is reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the + * reduced dimensions are retained with length 1. * * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @return a new instance of ReduceAll */ - public ReduceAll reduceAll(Operand input, Operand axis, - ReduceAll.Options... options) { + public ReduceAll reduceAll( + Operand input, Operand axis, ReduceAll.Options... options) { return ReduceAll.create(scope, input, axis, options); } /** - * Computes the "logical or" of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the "logical or" of elements across dimensions of a tensor. Reduces {@code + * input} along the dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank + * of the tensor is reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the + * reduced dimensions are retained with length 1. * * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @return a new instance of ReduceAny */ - public ReduceAny reduceAny(Operand input, Operand axis, - ReduceAny.Options... options) { + public ReduceAny reduceAny( + Operand input, Operand axis, ReduceAny.Options... options) { return ReduceAny.create(scope, input, axis, options); } /** - * Computes the maximum of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the maximum of elements across dimensions of a tensor. Reduces {@code input} along the + * dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank of the tensor is + * reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the reduced + * dimensions are retained with length 1. * * @param data type for {@code output} output * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @param data type for {@code Max} output and operands * @return a new instance of ReduceMax */ - public ReduceMax reduceMax(Operand input, - Operand axis, ReduceMax.Options... options) { + public ReduceMax reduceMax( + Operand input, Operand axis, ReduceMax.Options... options) { return ReduceMax.create(scope, input, axis, options); } /** - * Computes the minimum of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the minimum of elements across dimensions of a tensor. Reduces {@code input} along the + * dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank of the tensor is + * reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the reduced + * dimensions are retained with length 1. * * @param data type for {@code output} output * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @param data type for {@code Min} output and operands * @return a new instance of ReduceMin */ - public ReduceMin reduceMin(Operand input, - Operand axis, ReduceMin.Options... options) { + public ReduceMin reduceMin( + Operand input, Operand axis, ReduceMin.Options... options) { return ReduceMin.create(scope, input, axis, options); } /** - * Computes the product of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the product of elements across dimensions of a tensor. Reduces {@code input} along the + * dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank of the tensor is + * reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the reduced + * dimensions are retained with length 1. * * @param data type for {@code output} output * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @param data type for {@code Prod} output and operands * @return a new instance of ReduceProd */ - public ReduceProd reduceProd(Operand input, - Operand axis, ReduceProd.Options... options) { + public ReduceProd reduceProd( + Operand input, Operand axis, ReduceProd.Options... options) { return ReduceProd.create(scope, input, axis, options); } /** - * Computes the sum of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the sum of elements across dimensions of a tensor. Reduces {@code input} along the + * dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank of the tensor is + * reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the reduced + * dimensions are retained with length 1. * * @param data type for {@code output} output * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @param data type for {@code Sum} output and operands * @return a new instance of ReduceSum */ - public ReduceSum reduceSum(Operand input, Operand axis, - ReduceSum.Options... options) { + public ReduceSum reduceSum( + Operand input, Operand axis, ReduceSum.Options... options) { return ReduceSum.create(scope, input, axis, options); } @@ -4242,16 +4424,17 @@ public RefNextIteration refNextIteration(Operand data) { * @param data type for {@code RefSelect} output and operands * @return a new instance of RefSelect */ - public RefSelect refSelect(Operand index, - Iterable> inputs) { + public RefSelect refSelect( + Operand index, Iterable> inputs) { return RefSelect.create(scope, index, inputs); } /** - * Forwards the ref tensor {@code data} to the output port determined by {@code pred}. - * If {@code pred} is true, the {@code data} input is forwarded to {@code output_true}. Otherwise, - * the data goes to {@code output_false}. - *

See also {@code Switch} and {@code Merge}. + * Forwards the ref tensor {@code data} to the output port determined by {@code pred}. If {@code + * pred} is true, the {@code data} input is forwarded to {@code output_true}. Otherwise, the data + * goes to {@code output_false}. + * + *

See also {@code Switch} and {@code Merge}. * * @param data type for {@code output_false} output * @param data The ref tensor to be forwarded to the appropriate output. @@ -4272,25 +4455,31 @@ public RefSwitch refSwitch(Operand data, Operand * @param f The function to run remotely. * @return a new instance of RemoteCall */ - public RemoteCall remoteCall(Operand target, Iterable> args, - List> Tout, ConcreteFunction f) { + public RemoteCall remoteCall( + Operand target, + Iterable> args, + List> Tout, + ConcreteFunction f) { return RemoteCall.create(scope, target, args, Tout, f); } /** - * Reshapes a tensor. - * Given {@code tensor}, this operation returns a tensor that has the same values - * as {@code tensor} with shape {@code shape}. - *

If one component of 1-D tensor {@code shape} is the special value -1, the size of that - * dimension is computed so that the total size remains constant. In particular, a - * {@code shape} of {@code [-1]} flattens into 1-D. At most one component of {@code shape} may be - * unknown. - *

The {@code shape} must be 1-D and the operation returns a tensor with shape - * {@code shape} filled with the values of {@code tensor}. In this case, the number of elements - * implied by {@code shape} must be the same as the number of elements in {@code tensor}. - *

It is an error if {@code shape} is not 1-D. - *

For example: - *

+   * Reshapes a tensor. Given {@code tensor}, this operation returns a tensor that has the same
+   * values as {@code tensor} with shape {@code shape}.
+   *
+   * 

If one component of 1-D tensor {@code shape} is the special value -1, the size of that + * dimension is computed so that the total size remains constant. In particular, a {@code shape} + * of {@code [-1]} flattens into 1-D. At most one component of {@code shape} may be unknown. + * + *

The {@code shape} must be 1-D and the operation returns a tensor with shape {@code shape} + * filled with the values of {@code tensor}. In this case, the number of elements implied by + * {@code shape} must be the same as the number of elements in {@code tensor}. + * + *

It is an error if {@code shape} is not 1-D. + * + *

For example: + * + *

    *  # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
    *  # tensor 't' has shape [9]
    *  reshape(t, [3, 3]) ==> [[1, 2, 3],
@@ -4349,8 +4538,8 @@ public  Reshape reshape(Operand tensor, Operand data type for {@code output} output
    * @param resource Should be from a scalar {@code Variable} node.
-   * @param limit If incrementing ref would bring it above limit, instead generates an
-   *  'OutOfRange' error.
+   * @param limit If incrementing ref would bring it above limit, instead generates an 'OutOfRange'
+   *     error.
    * @param T the value of the T property
    * @param  data type for {@code ResourceCountUpTo} output and operands
    * @return a new instance of ResourceCountUpTo
@@ -4362,9 +4551,10 @@ public  ResourceCountUpTo resourceCountUpTo(
 
   /**
    * Gather slices from the variable pointed to by {@code resource} according to {@code indices}.
-   *  {@code indices} must be an integer tensor of any dimension (usually 0-D or 1-D).
-   *  Produces an output tensor with shape {@code indices.shape + params.shape[1:]} where:
-   *  
+   * {@code indices} must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an
+   * output tensor with shape {@code indices.shape + params.shape[1:]} where:
+   *
+   * 
    *      # Scalar indices
    *      output[:, ..., :] = params[indices, :, ... :]
    *
@@ -4383,8 +4573,11 @@ public  ResourceCountUpTo resourceCountUpTo(
    * @param  data type for {@code ResourceGather} output and operands
    * @return a new instance of ResourceGather
    */
-  public  ResourceGather resourceGather(Operand resource,
-      Operand indices, Class dtype, ResourceGather.Options... options) {
+  public  ResourceGather resourceGather(
+      Operand resource,
+      Operand indices,
+      Class dtype,
+      ResourceGather.Options... options) {
     return ResourceGather.create(scope, resource, indices, dtype, options);
   }
 
@@ -4398,15 +4591,15 @@ public  ResourceGather resourceGather(Operand data type for {@code ResourceGatherNd} output and operands
    * @return a new instance of ResourceGatherNd
    */
-  public  ResourceGatherNd resourceGatherNd(Operand resource,
-      Operand indices, Class dtype) {
+  public  ResourceGatherNd resourceGatherNd(
+      Operand resource, Operand indices, Class dtype) {
     return ResourceGatherNd.create(scope, resource, indices, dtype);
   }
 
   /**
-   * Adds sparse updates to the variable referenced by {@code resource}.
-   *  This operation computes
-   *  
+   * Adds sparse updates to the variable referenced by {@code resource}. This operation computes
+   *
+   * 
    *  # Scalar indices
    *  ref[indices, ...] += updates[...]
    *
@@ -4416,27 +4609,31 @@ public  ResourceGatherNd resourceGatherNd(Operand
-   *  

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions add. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
+ * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions add. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

* * @param resource Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to add to {@code ref}. * @return a new instance of ResourceScatterAdd */ - public ResourceScatterAdd resourceScatterAdd(Operand resource, - Operand indices, Operand updates) { + public ResourceScatterAdd resourceScatterAdd( + Operand resource, + Operand indices, + Operand updates) { return ResourceScatterAdd.create(scope, resource, indices, updates); } /** - * Divides sparse updates into the variable referenced by {@code resource}. - * This operation computes - *
+   * Divides sparse updates into the variable referenced by {@code resource}. This operation
+   * computes
+   *
+   * 
    *  # Scalar indices
    *  ref[indices, ...] /= updates[...]
    *
@@ -4446,27 +4643,31 @@ public ResourceScatterAdd resourceScatterAdd(Operand resource,
    *  # High rank indices (for each i, ..., j)
    *  ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
    *  
- *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions multiply. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
+ * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions multiply. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

* * @param resource Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to add to {@code ref}. * @return a new instance of ResourceScatterDiv */ - public ResourceScatterDiv resourceScatterDiv(Operand resource, - Operand indices, Operand updates) { + public ResourceScatterDiv resourceScatterDiv( + Operand resource, + Operand indices, + Operand updates) { return ResourceScatterDiv.create(scope, resource, indices, updates); } /** - * Reduces sparse updates into the variable referenced by {@code resource} using the {@code max} operation. - * This operation computes - *
+   * Reduces sparse updates into the variable referenced by {@code resource} using the {@code max}
+   * operation. This operation computes
+   *
+   * 
    *  # Scalar indices
    *  ref[indices, ...] = max(ref[indices, ...], updates[...])
    *
@@ -4476,27 +4677,31 @@ public ResourceScatterDiv resourceScatterDiv(Operand resource,
    *  # High rank indices (for each i, ..., j)
    *  ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
    *  
- *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions are combined. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
+ * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions are combined. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

* * @param resource Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to add to {@code ref}. * @return a new instance of ResourceScatterMax */ - public ResourceScatterMax resourceScatterMax(Operand resource, - Operand indices, Operand updates) { + public ResourceScatterMax resourceScatterMax( + Operand resource, + Operand indices, + Operand updates) { return ResourceScatterMax.create(scope, resource, indices, updates); } /** - * Reduces sparse updates into the variable referenced by {@code resource} using the {@code min} operation. - * This operation computes - *
+   * Reduces sparse updates into the variable referenced by {@code resource} using the {@code min}
+   * operation. This operation computes
+   *
+   * 
    *  # Scalar indices
    *  ref[indices, ...] = min(ref[indices, ...], updates[...])
    *
@@ -4506,27 +4711,31 @@ public ResourceScatterMax resourceScatterMax(Operand resource,
    *  # High rank indices (for each i, ..., j)
    *  ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
    *  
- *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions are combined. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
+ * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions are combined. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

* * @param resource Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to add to {@code ref}. * @return a new instance of ResourceScatterMin */ - public ResourceScatterMin resourceScatterMin(Operand resource, - Operand indices, Operand updates) { + public ResourceScatterMin resourceScatterMin( + Operand resource, + Operand indices, + Operand updates) { return ResourceScatterMin.create(scope, resource, indices, updates); } /** - * Multiplies sparse updates into the variable referenced by {@code resource}. - * This operation computes - *
+   * Multiplies sparse updates into the variable referenced by {@code resource}. This operation
+   * computes
+   *
+   * 
    *  # Scalar indices
    *  ref[indices, ...] *= updates[...]
    *
@@ -4536,38 +4745,47 @@ public ResourceScatterMin resourceScatterMin(Operand resource,
    *  # High rank indices (for each i, ..., j)
    *  ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
    *  
- *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions multiply. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
+ * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions multiply. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

* * @param resource Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to add to {@code ref}. * @return a new instance of ResourceScatterMul */ - public ResourceScatterMul resourceScatterMul(Operand resource, - Operand indices, Operand updates) { + public ResourceScatterMul resourceScatterMul( + Operand resource, + Operand indices, + Operand updates) { return ResourceScatterMul.create(scope, resource, indices, updates); } /** - * Applies sparse addition to individual values or slices in a Variable. - * {@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. - *

{@code indices} must be integer tensor, containing indices into {@code ref}. - * It must be shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. - *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to - * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th - * dimension of {@code ref}. - *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: - *

+   * Applies sparse addition to individual values or slices in a Variable. {@code ref} is a {@code
+   * Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}.
+   *
+   * 

{@code indices} must be integer tensor, containing indices into {@code ref}. It must be + * shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. + * + *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to indices + * into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th dimension + * of {@code ref}. + * + *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: + * + *

    *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
    *  
- *

For example, say we want to add 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that addition would look like this: - *

+   *
+   * 

For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In + * Python, that addition would look like this: + * + *

    *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
    *  indices = tf.constant([[4], [3], [1], [7]])
    *  updates = tf.constant([9, 10, 11, 12])
@@ -4575,23 +4793,26 @@ public ResourceScatterMul resourceScatterMul(Operand resource,
    *  with tf.Session() as sess:
    *    print sess.run(add)
    *  
- *

The resulting update to ref would look like this: - *

+   *
+   * 

The resulting update to ref would look like this: + * + *

    *  [1, 13, 3, 14, 14, 6, 7, 20]
    *  
- *

See {@code tf.scatter_nd} for more details about how to make updates to - * slices. + * + *

See {@code tf.scatter_nd} for more details about how to make updates to slices. * * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of - * values to add to ref. + * @param indices A Tensor. Must be one of the following types: int32, int64. A tensor of indices + * into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of values to add to ref. * @param options carries optional attribute values * @return a new instance of ResourceScatterNdAdd */ - public ResourceScatterNdAdd resourceScatterNdAdd(Operand ref, - Operand indices, Operand updates, + public ResourceScatterNdAdd resourceScatterNdAdd( + Operand ref, + Operand indices, + Operand updates, ResourceScatterNdAdd.Options... options) { return ResourceScatterNdAdd.create(scope, ref, indices, updates, options); } @@ -4600,15 +4821,17 @@ public ResourceScatterNdAdd resourceScatterNdAdd(Operand ref, * The ResourceScatterNdMax operation * * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of - * values whose element wise max is taken with ref + * @param indices A Tensor. Must be one of the following types: int32, int64. A tensor of indices + * into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of values whose element wise + * max is taken with ref * @param options carries optional attribute values * @return a new instance of ResourceScatterNdMax */ - public ResourceScatterNdMax resourceScatterNdMax(Operand ref, - Operand indices, Operand updates, + public ResourceScatterNdMax resourceScatterNdMax( + Operand ref, + Operand indices, + Operand updates, ResourceScatterNdMax.Options... options) { return ResourceScatterNdMax.create(scope, ref, indices, updates, options); } @@ -4617,34 +4840,42 @@ public ResourceScatterNdMax resourceScatterNdMax(Operand ref, * The ResourceScatterNdMin operation * * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of - * values whose element wise min is taken with ref. + * @param indices A Tensor. Must be one of the following types: int32, int64. A tensor of indices + * into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of values whose element wise + * min is taken with ref. * @param options carries optional attribute values * @return a new instance of ResourceScatterNdMin */ - public ResourceScatterNdMin resourceScatterNdMin(Operand ref, - Operand indices, Operand updates, + public ResourceScatterNdMin resourceScatterNdMin( + Operand ref, + Operand indices, + Operand updates, ResourceScatterNdMin.Options... options) { return ResourceScatterNdMin.create(scope, ref, indices, updates, options); } /** - * Applies sparse subtraction to individual values or slices in a Variable. - * {@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. - *

{@code indices} must be integer tensor, containing indices into {@code ref}. - * It must be shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. - *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to - * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th - * dimension of {@code ref}. - *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: - *

+   * Applies sparse subtraction to individual values or slices in a Variable. {@code ref} is a
+   * {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}.
+   *
+   * 

{@code indices} must be integer tensor, containing indices into {@code ref}. It must be + * shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. + * + *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to indices + * into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th dimension + * of {@code ref}. + * + *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: + * + *

    *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
    *  
- *

For example, say we want to subtract 4 scattered elements from a rank-1 tensor - * with 8 elements. In Python, that subtraction would look like this: - *

+   *
+   * 

For example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 + * elements. In Python, that subtraction would look like this: + * + *

    *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
    *  indices = tf.constant([[4], [3], [1], [7]])
    *  updates = tf.constant([9, 10, 11, 12])
@@ -4652,43 +4883,54 @@ public ResourceScatterNdMin resourceScatterNdMin(Operand ref,
    *  with tf.Session() as sess:
    *    print sess.run(sub)
    *  
- *

The resulting update to ref would look like this: - *

+   *
+   * 

The resulting update to ref would look like this: + * + *

    *  [1, -9, 3, -6, -4, 6, 7, -4]
    *  
- *

See {@code tf.scatter_nd} for more details about how to make updates to - * slices. + * + *

See {@code tf.scatter_nd} for more details about how to make updates to slices. * * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of - * values to add to ref. + * @param indices A Tensor. Must be one of the following types: int32, int64. A tensor of indices + * into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of values to add to ref. * @param options carries optional attribute values * @return a new instance of ResourceScatterNdSub */ - public ResourceScatterNdSub resourceScatterNdSub(Operand ref, - Operand indices, Operand updates, + public ResourceScatterNdSub resourceScatterNdSub( + Operand ref, + Operand indices, + Operand updates, ResourceScatterNdSub.Options... options) { return ResourceScatterNdSub.create(scope, ref, indices, updates, options); } /** - * Applies sparse {@code updates} to individual values or slices within a given - * variable according to {@code indices}. - *

{@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. - *

{@code indices} must be integer tensor, containing indices into {@code ref}. - * It must be shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. - *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to - * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th - * dimension of {@code ref}. - *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: - *

+   * Applies sparse {@code updates} to individual values or slices within a given variable according
+   * to {@code indices}.
+   *
+   * 

{@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} + * of rank {@code Q}. + * + *

{@code indices} must be integer tensor, containing indices into {@code ref}. It must be + * shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. + * + *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to indices + * into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th dimension + * of {@code ref}. + * + *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: + * + *

    *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
    *  
- *

For example, say we want to update 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that update would look like this: - *

+   *
+   * 

For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In + * Python, that update would look like this: + * + *

    *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
    *      indices = tf.constant([[4], [3], [1] ,[7]])
    *      updates = tf.constant([9, 10, 11, 12])
@@ -4696,31 +4938,36 @@ public ResourceScatterNdSub resourceScatterNdSub(Operand ref,
    *      with tf.Session() as sess:
    *        print sess.run(update)
    *  
- *

The resulting update to ref would look like this: - *

+   *
+   * 

The resulting update to ref would look like this: + * + *

    *  [1, 11, 3, 10, 9, 6, 7, 12]
    *  
- *

See {@code tf.scatter_nd} for more details about how to make updates to - * slices. + * + *

See {@code tf.scatter_nd} for more details about how to make updates to slices. * * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated - * values to add to ref. + * @param indices A Tensor. Must be one of the following types: int32, int64. A tensor of indices + * into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values to add to + * ref. * @param options carries optional attribute values * @return a new instance of ResourceScatterNdUpdate */ - public ResourceScatterNdUpdate resourceScatterNdUpdate(Operand ref, - Operand indices, Operand updates, + public ResourceScatterNdUpdate resourceScatterNdUpdate( + Operand ref, + Operand indices, + Operand updates, ResourceScatterNdUpdate.Options... options) { return ResourceScatterNdUpdate.create(scope, ref, indices, updates, options); } /** - * Subtracts sparse updates from the variable referenced by {@code resource}. - * This operation computes - *

+   * Subtracts sparse updates from the variable referenced by {@code resource}. This operation
+   * computes
+   *
+   * 
    *  # Scalar indices
    *  ref[indices, ...] -= updates[...]
    *
@@ -4730,27 +4977,30 @@ public ResourceScatterNdUpdate resourceScatterNdUpdate(Operand
    *  # High rank indices (for each i, ..., j)
    *  ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
    *  
- *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions add. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
+ * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions add. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

* * @param resource Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to add to {@code ref}. * @return a new instance of ResourceScatterSub */ - public ResourceScatterSub resourceScatterSub(Operand resource, - Operand indices, Operand updates) { + public ResourceScatterSub resourceScatterSub( + Operand resource, + Operand indices, + Operand updates) { return ResourceScatterSub.create(scope, resource, indices, updates); } /** - * Assigns sparse updates to the variable referenced by {@code resource}. - * This operation computes - *
+   * Assigns sparse updates to the variable referenced by {@code resource}. This operation computes
+   *
+   * 
    *  # Scalar indices
    *  ref[indices, ...] = updates[...]
    *
@@ -4766,18 +5016,21 @@ public ResourceScatterSub resourceScatterSub(Operand resource,
    * @param updates A tensor of updated values to add to {@code ref}.
    * @return a new instance of ResourceScatterUpdate
    */
-  public ResourceScatterUpdate resourceScatterUpdate(Operand resource,
-      Operand indices, Operand updates) {
+  public ResourceScatterUpdate resourceScatterUpdate(
+      Operand resource,
+      Operand indices,
+      Operand updates) {
     return ResourceScatterUpdate.create(scope, resource, indices, updates);
   }
 
   /**
-   * Assign {@code value} to the sliced l-value reference of {@code ref}.
-   *  The values of {@code value} are assigned to the positions in the variable
-   *  {@code ref} that are selected by the slice parameters. The slice parameters
-   *  {@code begin, }end{@code , }strides{@code , etc. work exactly as in }StridedSlice`.
-   *  

NOTE this op currently does not support broadcasting and so {@code value}'s - * shape must be exactly the shape produced by the slice of {@code ref}. + * Assign {@code value} to the sliced l-value reference of {@code ref}. The values of {@code + * value} are assigned to the positions in the variable {@code ref} that are selected by the slice + * parameters. The slice parameters {@code begin, }end{@code , }strides{@code , etc. work exactly + * as in }StridedSlice`. + * + *

NOTE this op currently does not support broadcasting and so {@code value}'s shape must be + * exactly the shape produced by the slice of {@code ref}. * * @param ref the ref value * @param begin the begin value @@ -4789,23 +5042,31 @@ public ResourceScatterUpdate resourceScatterUpdate(Operand reso * @return a new instance of ResourceStridedSliceAssign */ public ResourceStridedSliceAssign resourceStridedSliceAssign( - Operand ref, Operand begin, Operand end, Operand strides, - Operand value, ResourceStridedSliceAssign.Options... options) { + Operand ref, + Operand begin, + Operand end, + Operand strides, + Operand value, + ResourceStridedSliceAssign.Options... options) { return ResourceStridedSliceAssign.create(scope, ref, begin, end, strides, value, options); } /** - * Reverses specific dimensions of a tensor. - * NOTE {@code tf.reverse} has now changed behavior in preparation for 1.0. - * {@code tf.reverse_v2} is currently an alias that will be deprecated before TF 1.0. - *

Given a {@code tensor}, and a {@code int32} tensor {@code axis} representing the set of - * dimensions of {@code tensor} to reverse. This operation reverses each dimension - * {@code i} for which there exists {@code j} s.t. {@code axis[j] == i}. - *

{@code tensor} can have up to 8 dimensions. The number of dimensions specified - * in {@code axis} may be 0 or more entries. If an index is specified more than - * once, a InvalidArgument error is raised. - *

For example: - *

+   * Reverses specific dimensions of a tensor. NOTE {@code tf.reverse} has now changed behavior in
+   * preparation for 1.0. {@code tf.reverse_v2} is currently an alias that will be deprecated before
+   * TF 1.0.
+   *
+   * 

Given a {@code tensor}, and a {@code int32} tensor {@code axis} representing the set of + * dimensions of {@code tensor} to reverse. This operation reverses each dimension {@code i} for + * which there exists {@code j} s.t. {@code axis[j] == i}. + * + *

{@code tensor} can have up to 8 dimensions. The number of dimensions specified in {@code + * axis} may be 0 or more entries. If an index is specified more than once, a InvalidArgument + * error is raised. + * + *

For example: + * + *

    *  # tensor 't' is [[[[ 0,  1,  2,  3],
    *  #                  [ 4,  5,  6,  7],
    *  #                  [ 8,  9, 10, 11]],
@@ -4841,8 +5102,8 @@ public  ResourceStridedSliceAssign resourceStridedSliceAssign
    *
    * @param  data type for {@code output} output
    * @param tensor Up to 8-D.
-   * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range
-   *  {@code [-rank(tensor), rank(tensor))}.
+   * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range {@code
+   *     [-rank(tensor), rank(tensor))}.
    * @param  data type for {@code ReverseV2} output and operands
    * @return a new instance of Reverse
    */
@@ -4851,17 +5112,20 @@ public  Reverse reverse(Operand tensor, OperandThe elements of {@code seq_lengths} must obey {@code seq_lengths[i] <= input.dims[seq_dim]},
-   *  and {@code seq_lengths} must be a vector of length {@code input.dims[batch_dim]}.
-   *  

The output slice {@code i} along dimension {@code batch_dim} is then given by input - * slice {@code i}, with the first {@code seq_lengths[i]} slices along dimension - * {@code seq_dim} reversed. - *

For example: - *

+   * Reverses variable length slices. This op first slices {@code input} along the dimension {@code
+   * batch_dim}, and for each slice {@code i}, reverses the first {@code seq_lengths[i]} elements
+   * along the dimension {@code seq_dim}.
+   *
+   * 

The elements of {@code seq_lengths} must obey {@code seq_lengths[i] <= input.dims[seq_dim]}, + * and {@code seq_lengths} must be a vector of length {@code input.dims[batch_dim]}. + * + *

The output slice {@code i} along dimension {@code batch_dim} is then given by input slice + * {@code i}, with the first {@code seq_lengths[i]} slices along dimension {@code seq_dim} + * reversed. + * + *

For example: + * + *

    *  # Given this:
    *  batch_dim = 0
    *  seq_dim = 1
@@ -4880,8 +5144,10 @@ public  Reverse reverse(Operand tensor, Operand
-   *  

In contrast, if: - *

+   *
+   * 

In contrast, if: + * + *

    *  # Given this:
    *  batch_dim = 2
    *  seq_dim = 0
@@ -4903,27 +5169,31 @@ public  Reverse reverse(Operand tensor, Operand data type for {@code output} output
    * @param input The input to reverse.
-   * @param seqLengths 1-D with length {@code input.dims(batch_dim)} and
-   *  {@code max(seq_lengths) <= input.dims(seq_dim)}
+   * @param seqLengths 1-D with length {@code input.dims(batch_dim)} and {@code max(seq_lengths) <=
+   *     input.dims(seq_dim)}
    * @param seqDim The dimension which is partially reversed.
    * @param options carries optional attribute values
    * @param  data type for {@code ReverseSequence} output and operands
    * @return a new instance of ReverseSequence
    */
-  public  ReverseSequence reverseSequence(Operand input,
-      Operand seqLengths, Long seqDim, ReverseSequence.Options... options) {
+  public  ReverseSequence reverseSequence(
+      Operand input,
+      Operand seqLengths,
+      Long seqDim,
+      ReverseSequence.Options... options) {
     return ReverseSequence.create(scope, input, seqLengths, seqDim, options);
   }
 
   /**
-   * Rolls the elements of a tensor along an axis.
-   *  The elements are shifted positively (towards larger indices) by the offset of
-   *  {@code shift} along the dimension of {@code axis}. Negative {@code shift} values will shift
-   *  elements in the opposite direction. Elements that roll passed the last position
-   *  will wrap around to the first and vice versa. Multiple shifts along multiple
-   *  axes may be specified.
-   *  

For example: - *

+   * Rolls the elements of a tensor along an axis. The elements are shifted positively (towards
+   * larger indices) by the offset of {@code shift} along the dimension of {@code axis}. Negative
+   * {@code shift} values will shift elements in the opposite direction. Elements that roll passed
+   * the last position will wrap around to the first and vice versa. Multiple shifts along multiple
+   * axes may be specified.
+   *
+   * 

For example: + * + *

    *  # 't' is [0, 1, 2, 3, 4]
    *  roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
    *
@@ -4938,26 +5208,25 @@ public  ReverseSequence reverseSequence(Operand input,
    *
    * @param  data type for {@code output} output
    * @param input the input value
-   * @param shift Dimension must be 0-D or 1-D. {@code shift[i]} specifies the number of places by which
-   *  elements are shifted positively (towards larger indices) along the dimension
-   *  specified by {@code axis[i]}. Negative shifts will roll the elements in the opposite
-   *  direction.
-   * @param axis Dimension must be 0-D or 1-D. {@code axis[i]} specifies the dimension that the shift
-   *  {@code shift[i]} should occur. If the same axis is referenced more than once, the
-   *  total shift for that axis will be the sum of all the shifts that belong to that
-   *  axis.
+   * @param shift Dimension must be 0-D or 1-D. {@code shift[i]} specifies the number of places by
+   *     which elements are shifted positively (towards larger indices) along the dimension
+   *     specified by {@code axis[i]}. Negative shifts will roll the elements in the opposite
+   *     direction.
+   * @param axis Dimension must be 0-D or 1-D. {@code axis[i]} specifies the dimension that the
+   *     shift {@code shift[i]} should occur. If the same axis is referenced more than once, the
+   *     total shift for that axis will be the sum of all the shifts that belong to that axis.
    * @param  data type for {@code Roll} output and operands
    * @return a new instance of Roll
    */
-  public  Roll roll(Operand input, Operand shift,
-      Operand axis) {
+  public  Roll roll(
+      Operand input, Operand shift, Operand axis) {
     return Roll.create(scope, input, shift, axis);
   }
 
   /**
-   * Adds sparse updates to a variable reference.
-   *  This operation computes
-   *  
+   * Adds sparse updates to a variable reference. This operation computes
+   *
+   * 
    *  # Scalar indices
    *  ref[indices, ...] += updates[...]
    *
@@ -4967,14 +5236,16 @@ public  Roll roll(Operand input, Operand
-   *  

This operation outputs {@code ref} after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions add. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
+ * + *

This operation outputs {@code ref} after the update is done. This makes it easier to chain + * operations that need to use the reset value. + * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions add. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

* * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. @@ -4984,15 +5255,18 @@ public Roll roll(Operand input, Operand data type for {@code ScatterAdd} output and operands * @return a new instance of ScatterAdd */ - public ScatterAdd scatterAdd(Operand ref, - Operand indices, Operand updates, ScatterAdd.Options... options) { + public ScatterAdd scatterAdd( + Operand ref, + Operand indices, + Operand updates, + ScatterAdd.Options... options) { return ScatterAdd.create(scope, ref, indices, updates, options); } /** - * Divides a variable reference by sparse updates. - * This operation computes - *
+   * Divides a variable reference by sparse updates. This operation computes
+   *
+   * 
    *      # Scalar indices
    *      ref[indices, ...] /= updates[...]
    *
@@ -5002,11 +5276,15 @@ public  ScatterAdd scatterAdd(Operand ref,
    *      # High rank indices (for each i, ..., j)
    *      ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
    *  
- *

This operation outputs {@code ref} after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions divide. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. + * + *

This operation outputs {@code ref} after the update is done. This makes it easier to chain + * operations that need to use the reset value. + * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions divide. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}. * * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. @@ -5016,15 +5294,19 @@ public ScatterAdd scatterAdd(Operand ref, * @param data type for {@code ScatterDiv} output and operands * @return a new instance of ScatterDiv */ - public ScatterDiv scatterDiv(Operand ref, - Operand indices, Operand updates, ScatterDiv.Options... options) { + public ScatterDiv scatterDiv( + Operand ref, + Operand indices, + Operand updates, + ScatterDiv.Options... options) { return ScatterDiv.create(scope, ref, indices, updates, options); } /** - * Reduces sparse updates into a variable reference using the {@code max} operation. - * This operation computes - *

+   * Reduces sparse updates into a variable reference using the {@code max} operation. This
+   * operation computes
+   *
+   * 
    *  # Scalar indices
    *  ref[indices, ...] = max(ref[indices, ...], updates[...])
    *
@@ -5034,14 +5316,16 @@ public  ScatterDiv scatterDiv(Operand ref,
    *  # High rank indices (for each i, ..., j)
    *  ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
    *  
- *

This operation outputs {@code ref} after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions combine. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
+ * + *

This operation outputs {@code ref} after the update is done. This makes it easier to chain + * operations that need to use the reset value. + * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions combine. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

* * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. @@ -5051,15 +5335,19 @@ public ScatterDiv scatterDiv(Operand ref, * @param data type for {@code ScatterMax} output and operands * @return a new instance of ScatterMax */ - public ScatterMax scatterMax(Operand ref, - Operand indices, Operand updates, ScatterMax.Options... options) { + public ScatterMax scatterMax( + Operand ref, + Operand indices, + Operand updates, + ScatterMax.Options... options) { return ScatterMax.create(scope, ref, indices, updates, options); } /** - * Reduces sparse updates into a variable reference using the {@code min} operation. - * This operation computes - *
+   * Reduces sparse updates into a variable reference using the {@code min} operation. This
+   * operation computes
+   *
+   * 
    *  # Scalar indices
    *  ref[indices, ...] = min(ref[indices, ...], updates[...])
    *
@@ -5069,14 +5357,16 @@ public  ScatterMax scatterMax(Operand ref,
    *  # High rank indices (for each i, ..., j)
    *  ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
    *  
- *

This operation outputs {@code ref} after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions combine. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
+ * + *

This operation outputs {@code ref} after the update is done. This makes it easier to chain + * operations that need to use the reset value. + * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions combine. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

* * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. @@ -5086,15 +5376,18 @@ public ScatterMax scatterMax(Operand ref, * @param data type for {@code ScatterMin} output and operands * @return a new instance of ScatterMin */ - public ScatterMin scatterMin(Operand ref, - Operand indices, Operand updates, ScatterMin.Options... options) { + public ScatterMin scatterMin( + Operand ref, + Operand indices, + Operand updates, + ScatterMin.Options... options) { return ScatterMin.create(scope, ref, indices, updates, options); } /** - * Multiplies sparse updates into a variable reference. - * This operation computes - *
+   * Multiplies sparse updates into a variable reference. This operation computes
+   *
+   * 
    *      # Scalar indices
    *      ref[indices, ...] *= updates[...]
    *
@@ -5104,11 +5397,15 @@ public  ScatterMin scatterMin(Operand ref,
    *      # High rank indices (for each i, ..., j)
    *      ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
    *  
- *

This operation outputs {@code ref} after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their contributions multiply. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. + * + *

This operation outputs {@code ref} after the update is done. This makes it easier to chain + * operations that need to use the reset value. + * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their contributions multiply. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}. * * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. @@ -5118,63 +5415,75 @@ public ScatterMin scatterMin(Operand ref, * @param data type for {@code ScatterMul} output and operands * @return a new instance of ScatterMul */ - public ScatterMul scatterMul(Operand ref, - Operand indices, Operand updates, ScatterMul.Options... options) { + public ScatterMul scatterMul( + Operand ref, + Operand indices, + Operand updates, + ScatterMul.Options... options) { return ScatterMul.create(scope, ref, indices, updates, options); } /** - * Scatter {@code updates} into a new tensor according to {@code indices}. - * Creates a new tensor by applying sparse {@code updates} to individual values or - * slices within a tensor (initially zero for numeric, empty for string) of - * the given {@code shape} according to indices. This operator is the inverse of the - * {@code tf.gather_nd} operator which extracts values or slices from a given tensor. - *

This operation is similar to tensor_scatter_add, except that the tensor is - * zero-initialized. Calling {@code tf.scatter_nd(indices, values, shape)} is identical - * to {@code tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)} - *

If {@code indices} contains duplicates, then their updates are accumulated (summed). - *

WARNING: The order in which updates are applied is nondeterministic, so the - * output will be nondeterministic if {@code indices} contains duplicates -- because - * of some numerical approximation issues, numbers summed in different order - * may yield different results. - *

{@code indices} is an integer tensor containing indices into a new tensor of shape - * {@code shape}. The last dimension of {@code indices} can be at most the rank of {@code shape}: - *

+   * Scatter {@code updates} into a new tensor according to {@code indices}. Creates a new tensor by
+   * applying sparse {@code updates} to individual values or slices within a tensor (initially zero
+   * for numeric, empty for string) of the given {@code shape} according to indices. This operator
+   * is the inverse of the {@code tf.gather_nd} operator which extracts values or slices from a
+   * given tensor.
+   *
+   * 

This operation is similar to tensor_scatter_add, except that the tensor is zero-initialized. + * Calling {@code tf.scatter_nd(indices, values, shape)} is identical to {@code + * tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)} + * + *

If {@code indices} contains duplicates, then their updates are accumulated (summed). + * + *

WARNING: The order in which updates are applied is nondeterministic, so the + * output will be nondeterministic if {@code indices} contains duplicates -- because of some + * numerical approximation issues, numbers summed in different order may yield different results. + * + *

{@code indices} is an integer tensor containing indices into a new tensor of shape {@code + * shape}. The last dimension of {@code indices} can be at most the rank of {@code shape}: + * + *

    *  indices.shape[-1] <= shape.rank
    *  
- *

The last dimension of {@code indices} corresponds to indices into elements - * (if {@code indices.shape[-1] = shape.rank}) or slices - * (if {@code indices.shape[-1] < shape.rank}) along dimension {@code indices.shape[-1]} of - * {@code shape}. {@code updates} is a tensor with shape - *

+   *
+   * 

The last dimension of {@code indices} corresponds to indices into elements (if {@code + * indices.shape[-1] = shape.rank}) or slices (if {@code indices.shape[-1] < shape.rank}) along + * dimension {@code indices.shape[-1]} of {@code shape}. {@code updates} is a tensor with shape + * + *

    *  indices.shape[:-1] + shape[indices.shape[-1]:]
    *  
- *

The simplest form of scatter is to insert individual elements in a tensor by - * index. For example, say we want to insert 4 scattered elements in a rank-1 - * tensor with 8 elements. - *

- * - *
- *

In Python, this scatter operation would look like this: - *

+   *
+   * 

The simplest form of scatter is to insert individual elements in a tensor by index. For + * example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements.

+ * + *

In Python, this scatter operation would look like this: + * + *

    *      indices = tf.constant([[4], [3], [1], [7]])
    *      updates = tf.constant([9, 10, 11, 12])
    *      shape = tf.constant([8])
    *      scatter = tf.scatter_nd(indices, updates, shape)
    *      print(scatter)
    *  
- *

The resulting tensor would look like this: - *

+   *
+   * 

The resulting tensor would look like this: + * + *

    *  [0, 11, 0, 10, 9, 0, 0, 12]
    *  
- *

We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. - *

- * - *
- *

In Python, this scatter operation would look like this: - *

+   *
+   * 

We can also, insert entire slices of a higher rank tensor all at once. For example, if we + * wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new + * values.

+ * + *

In Python, this scatter operation would look like this: + * + *

    *      indices = tf.constant([[0], [2]])
    *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
    *                              [7, 7, 7, 7], [8, 8, 8, 8]],
@@ -5184,15 +5493,18 @@ public  ScatterMul scatterMul(Operand ref,
    *      scatter = tf.scatter_nd(indices, updates, shape)
    *      print(scatter)
    *  
- *

The resulting tensor would look like this: - *

+   *
+   * 

The resulting tensor would look like this: + * + *

    *  [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
    *   [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
    *   [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
    *   [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
    *  
- *

Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * + *

Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out + * of bound index is found, the index is ignored. * * @param data type for {@code output} output * @param indices Index tensor. @@ -5202,26 +5514,32 @@ public ScatterMul scatterMul(Operand ref, * @param data type for {@code ScatterNd} output and operands * @return a new instance of ScatterNd */ - public ScatterNd scatterNd(Operand indices, - Operand updates, Operand shape) { + public ScatterNd scatterNd( + Operand indices, Operand updates, Operand shape) { return ScatterNd.create(scope, indices, updates, shape); } /** - * Applies sparse addition to individual values or slices in a Variable. - * {@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. - *

{@code indices} must be integer tensor, containing indices into {@code ref}. - * It must be shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. - *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to - * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th - * dimension of {@code ref}. - *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: - *

+   * Applies sparse addition to individual values or slices in a Variable. {@code ref} is a {@code
+   * Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}.
+   *
+   * 

{@code indices} must be integer tensor, containing indices into {@code ref}. It must be + * shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. + * + *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to indices + * into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th dimension + * of {@code ref}. + * + *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: + * + *

    *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
    *  
- *

For example, say we want to add 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that addition would look like this: - *

+   *
+   * 

For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In + * Python, that addition would look like this: + * + *

    *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
    *  indices = tf.constant([[4], [3], [1], [7]])
    *  updates = tf.constant([9, 10, 11, 12])
@@ -5229,45 +5547,57 @@ public  ScatterNd scatterNd(Operand in
    *  with tf.Session() as sess:
    *    print sess.run(add)
    *  
- *

The resulting update to ref would look like this: - *

+   *
+   * 

The resulting update to ref would look like this: + * + *

    *  [1, 13, 3, 14, 14, 6, 7, 20]
    *  
- *

See {@code tf.scatter_nd} for more details about how to make updates to - * slices. + * + *

See {@code tf.scatter_nd} for more details about how to make updates to slices. * * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to add to ref. + * @param indices A Tensor. Must be one of the following types: int32, int64. A tensor of indices + * into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values to add to + * ref. * @param options carries optional attribute values * @param data type for {@code ScatterNdAdd} output and operands * @return a new instance of ScatterNdAdd */ - public ScatterNdAdd scatterNdAdd(Operand ref, - Operand indices, Operand updates, ScatterNdAdd.Options... options) { + public ScatterNdAdd scatterNdAdd( + Operand ref, + Operand indices, + Operand updates, + ScatterNdAdd.Options... options) { return ScatterNdAdd.create(scope, ref, indices, updates, options); } /** - * Applies sparse addition to {@code input} using individual values or slices - * from {@code updates} according to indices {@code indices}. The updates are non-aliasing: - * {@code input} is only modified in-place if no other operations will use it. - * Otherwise, a copy of {@code input} is made. This operation has a gradient with - * respect to both {@code input} and {@code updates}. - *

{@code input} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. - *

{@code indices} must be integer tensor, containing indices into {@code input}. - * It must be shape \([d_0, ..., d_{Q-2}, K]\) where {@code 0 < K <= P}. - *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to - * indices into elements (if {@code K = P}) or {@code (P-K)}-dimensional slices - * (if {@code K < P}) along the {@code K}th dimension of {@code input}. - *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: - *

$$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ - *

For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 - * elements. In Python, that addition would look like this: - *

+   * Applies sparse addition to {@code input} using individual values or slices from {@code updates}
+   * according to indices {@code indices}. The updates are non-aliasing: {@code input} is only
+   * modified in-place if no other operations will use it. Otherwise, a copy of {@code input} is
+   * made. This operation has a gradient with respect to both {@code input} and {@code updates}.
+   *
+   * 

{@code input} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code + * Tensor} of rank {@code Q}. + * + *

{@code indices} must be integer tensor, containing indices into {@code input}. It must be + * shape \([d_0, ..., d_{Q-2}, K]\) where {@code 0 < K <= P}. + * + *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to indices + * into elements (if {@code K = P}) or {@code (P-K)}-dimensional slices (if {@code K < P}) along + * the {@code K}th dimension of {@code input}. + * + *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: + * + *

$$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ + * + *

For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In + * Python, that addition would look like this: + * + *

    *  input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
    *  indices = tf.constant([[4], [3], [1], [7]])
    *  updates = tf.constant([9, 10, 11, 12])
@@ -5275,42 +5605,53 @@ public  ScatterNdAdd scatterNdAdd(Operand ref,
    *  with tf.Session() as sess:
    *    print(sess.run(output))
    *  
- *

The resulting value {@code output} would look like this: - *

+   *
+   * 

The resulting value {@code output} would look like this: + * + *

    *  [1, 13, 3, 14, 14, 6, 7, 20]
    *  
- *

See {@code tf.scatter_nd} for more details about how to make updates to slices. + * + *

See {@code tf.scatter_nd} for more details about how to make updates to slices. * * @param data type for {@code output} output * @param input A Tensor. - * @param indices A Tensor. Must be one of the following types: {@code int32}, {@code int64}. - * A tensor of indices into {@code input}. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to add to {@code input}. + * @param indices A Tensor. Must be one of the following types: {@code int32}, {@code int64}. A + * tensor of indices into {@code input}. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values to add to + * {@code input}. * @param data type for {@code ScatterNdNonAliasingAdd} output and operands * @return a new instance of ScatterNdNonAliasingAdd */ - public ScatterNdNonAliasingAdd scatterNdNonAliasingAdd(Operand input, - Operand indices, Operand updates) { + public ScatterNdNonAliasingAdd scatterNdNonAliasingAdd( + Operand input, Operand indices, Operand updates) { return ScatterNdNonAliasingAdd.create(scope, input, indices, updates); } /** - * Applies sparse subtraction to individual values or slices in a Variable. - * within a given variable according to {@code indices}. - *

{@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. - *

{@code indices} must be integer tensor, containing indices into {@code ref}. - * It must be shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. - *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to - * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th - * dimension of {@code ref}. - *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: - *

+   * Applies sparse subtraction to individual values or slices in a Variable. within a given
+   * variable according to {@code indices}.
+   *
+   * 

{@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} + * of rank {@code Q}. + * + *

{@code indices} must be integer tensor, containing indices into {@code ref}. It must be + * shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. + * + *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to indices + * into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th dimension + * of {@code ref}. + * + *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: + * + *

    *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
    *  
- *

For example, say we want to subtract 4 scattered elements from a rank-1 tensor - * with 8 elements. In Python, that subtraction would look like this: - *

+   *
+   * 

For example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 + * elements. In Python, that subtraction would look like this: + * + *

    *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
    *  indices = tf.constant([[4], [3], [1], [7]])
    *  updates = tf.constant([9, 10, 11, 12])
@@ -5318,42 +5659,55 @@ public  ScatterNdNonAliasingAdd scatterNdNonAliasingAdd(Oper
    *  with tf.Session() as sess:
    *    print sess.run(sub)
    *  
- *

The resulting update to ref would look like this: - *

+   *
+   * 

The resulting update to ref would look like this: + * + *

    *  [1, -9, 3, -6, -4, 6, 7, -4]
    *  
- *

See {@code tf.scatter_nd} for more details about how to make updates to - * slices. + * + *

See {@code tf.scatter_nd} for more details about how to make updates to slices. * * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to subtract from ref. + * @param indices A Tensor. Must be one of the following types: int32, int64. A tensor of indices + * into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values to subtract + * from ref. * @param options carries optional attribute values * @param data type for {@code ScatterNdSub} output and operands * @return a new instance of ScatterNdSub */ - public ScatterNdSub scatterNdSub(Operand ref, - Operand indices, Operand updates, ScatterNdSub.Options... options) { + public ScatterNdSub scatterNdSub( + Operand ref, + Operand indices, + Operand updates, + ScatterNdSub.Options... options) { return ScatterNdSub.create(scope, ref, indices, updates, options); } /** - * Applies sparse {@code updates} to individual values or slices within a given - * variable according to {@code indices}. - *

{@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. - *

{@code indices} must be integer tensor, containing indices into {@code ref}. - * It must be shape \([d_0, ..., d_{Q-2}, K]\) where {@code 0 < K <= P}. - *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to - * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th - * dimension of {@code ref}. - *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: - *

$$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ - *

For example, say we want to update 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that update would look like this: - *

+   * Applies sparse {@code updates} to individual values or slices within a given variable according
+   * to {@code indices}.
+   *
+   * 

{@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} + * of rank {@code Q}. + * + *

{@code indices} must be integer tensor, containing indices into {@code ref}. It must be + * shape \([d_0, ..., d_{Q-2}, K]\) where {@code 0 < K <= P}. + * + *

The innermost dimension of {@code indices} (with length {@code K}) corresponds to indices + * into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th dimension + * of {@code ref}. + * + *

{@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: + * + *

$$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ + * + *

For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In + * Python, that update would look like this: + * + *

    *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
    *      indices = tf.constant([[4], [3], [1] ,[7]])
    *      updates = tf.constant([9, 10, 11, 12])
@@ -5361,32 +5715,39 @@ public  ScatterNdSub scatterNdSub(Operand ref,
    *      with tf.Session() as sess:
    *        print sess.run(update)
    *  
- *

The resulting update to ref would look like this: - *

+   *
+   * 

The resulting update to ref would look like this: + * + *

    *  [1, 11, 3, 10, 9, 6, 7, 12]
    *  
- *

See {@code tf.scatter_nd} for more details about how to make updates to - * slices. - *

See also {@code tf.scatter_update} and {@code tf.batch_scatter_update}. + * + *

See {@code tf.scatter_nd} for more details about how to make updates to slices. + * + *

See also {@code tf.scatter_update} and {@code tf.batch_scatter_update}. * * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated - * values to add to ref. + * @param indices A Tensor. Must be one of the following types: int32, int64. A tensor of indices + * into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values to add to + * ref. * @param options carries optional attribute values * @param data type for {@code ScatterNdUpdate} output and operands * @return a new instance of ScatterNdUpdate */ - public ScatterNdUpdate scatterNdUpdate(Operand ref, - Operand indices, Operand updates, ScatterNdUpdate.Options... options) { + public ScatterNdUpdate scatterNdUpdate( + Operand ref, + Operand indices, + Operand updates, + ScatterNdUpdate.Options... options) { return ScatterNdUpdate.create(scope, ref, indices, updates, options); } /** * Subtracts sparse updates to a variable reference. - *

+   *
+   * 
    *      # Scalar indices
    *      ref[indices, ...] -= updates[...]
    *
@@ -5396,14 +5757,16 @@ public  ScatterNdUpdate scatterNdUpdate(Operand ref,
    *      # High rank indices (for each i, ..., j)
    *      ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
    *  
- * This operation outputs {@code ref} after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

Duplicate entries are handled correctly: if multiple {@code indices} reference - * the same location, their (negated) contributions add. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
+ * + * This operation outputs {@code ref} after the update is done. This makes it easier to chain + * operations that need to use the reset value. + * + *

Duplicate entries are handled correctly: if multiple {@code indices} reference the same + * location, their (negated) contributions add. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

* * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. @@ -5413,15 +5776,18 @@ public ScatterNdUpdate scatterNdUpdate(Operand ref, * @param data type for {@code ScatterSub} output and operands * @return a new instance of ScatterSub */ - public ScatterSub scatterSub(Operand ref, - Operand indices, Operand updates, ScatterSub.Options... options) { + public ScatterSub scatterSub( + Operand ref, + Operand indices, + Operand updates, + ScatterSub.Options... options) { return ScatterSub.create(scope, ref, indices, updates, options); } /** - * Applies sparse updates to a variable reference. - * This operation computes - *
+   * Applies sparse updates to a variable reference. This operation computes
+   *
+   * 
    *      # Scalar indices
    *      ref[indices, ...] = updates[...]
    *
@@ -5431,16 +5797,18 @@ public  ScatterSub scatterSub(Operand ref,
    *      # High rank indices (for each i, ..., j)
    *      ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
    *  
- *

This operation outputs {@code ref} after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

If values in {@code ref} is to be updated more than once, because there are - * duplicate entries in {@code indices}, the order at which the updates happen - * for each value is undefined. - *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - *

- * - *
- *

See also {@code tf.batch_scatter_update} and {@code tf.scatter_nd_update}. + * + *

This operation outputs {@code ref} after the update is done. This makes it easier to chain + * operations that need to use the reset value. + * + *

If values in {@code ref} is to be updated more than once, because there are duplicate + * entries in {@code indices}, the order at which the updates happen for each value is undefined. + * + *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = + * []}.

+ * + *

See also {@code tf.batch_scatter_update} and {@code tf.scatter_nd_update}. * * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. @@ -5450,8 +5818,11 @@ public ScatterSub scatterSub(Operand ref, * @param data type for {@code ScatterUpdate} output and operands * @return a new instance of ScatterUpdate */ - public ScatterUpdate scatterUpdate(Operand ref, - Operand indices, Operand updates, ScatterUpdate.Options... options) { + public ScatterUpdate scatterUpdate( + Operand ref, + Operand indices, + Operand updates, + ScatterUpdate.Options... options) { return ScatterUpdate.create(scope, ref, indices, updates, options); } @@ -5470,20 +5841,25 @@ public Select select(Operand condition, Operand t } /** - * Computes the difference between two lists of numbers or strings. - * Given a list {@code x} and a list {@code y}, this operation returns a list {@code out} that - * represents all values that are in {@code x} but not in {@code y}. The returned list {@code out} - * is sorted in the same order that the numbers appear in {@code x} (duplicates are - * preserved). This operation also returns a list {@code idx} that represents the - * position of each {@code out} element in {@code x}. In other words: - *

{@code out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]} - *

For example, given this input: - *

+   * Computes the difference between two lists of numbers or strings. Given a list {@code x} and a
+   * list {@code y}, this operation returns a list {@code out} that represents all values that are
+   * in {@code x} but not in {@code y}. The returned list {@code out} is sorted in the same order
+   * that the numbers appear in {@code x} (duplicates are preserved). This operation also returns a
+   * list {@code idx} that represents the position of each {@code out} element in {@code x}. In
+   * other words:
+   *
+   * 

{@code out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]} + * + *

For example, given this input: + * + *

    *  x = [1, 2, 3, 4, 5, 6]
    *  y = [1, 3, 5]
    *  
- *

This operation would return: - *

+   *
+   * 

This operation would return: + * + *

    *  out ==> [2, 4, 6]
    *  idx ==> [1, 3, 5]
    *  
@@ -5500,20 +5876,25 @@ public SetDiff1d setDiff1d(Operand x, Operand } /** - * Computes the difference between two lists of numbers or strings. - * Given a list {@code x} and a list {@code y}, this operation returns a list {@code out} that - * represents all values that are in {@code x} but not in {@code y}. The returned list {@code out} - * is sorted in the same order that the numbers appear in {@code x} (duplicates are - * preserved). This operation also returns a list {@code idx} that represents the - * position of each {@code out} element in {@code x}. In other words: - *

{@code out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]} - *

For example, given this input: - *

+   * Computes the difference between two lists of numbers or strings. Given a list {@code x} and a
+   * list {@code y}, this operation returns a list {@code out} that represents all values that are
+   * in {@code x} but not in {@code y}. The returned list {@code out} is sorted in the same order
+   * that the numbers appear in {@code x} (duplicates are preserved). This operation also returns a
+   * list {@code idx} that represents the position of each {@code out} element in {@code x}. In
+   * other words:
+   *
+   * 

{@code out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]} + * + *

For example, given this input: + * + *

    *  x = [1, 2, 3, 4, 5, 6]
    *  y = [1, 3, 5]
    *  
- *

This operation would return: - *

+   *
+   * 

This operation would return: + * + *

    *  out ==> [2, 4, 6]
    *  idx ==> [1, 3, 5]
    *  
@@ -5527,18 +5908,18 @@ public SetDiff1d setDiff1d(Operand x, Operand * @param data type for {@code ListDiff} output and operands * @return a new instance of SetDiff1d */ - public SetDiff1d setDiff1d(Operand x, Operand y, - Class outIdx) { + public SetDiff1d setDiff1d( + Operand x, Operand y, Class outIdx) { return SetDiff1d.create(scope, x, y, outIdx); } /** - * Number of unique elements along last dimension of input {@code set}. - * Input {@code set} is a {@code SparseTensor} represented by {@code set_indices}, {@code set_values}, - * and {@code set_shape}. The last dimension contains values in a set, duplicates are - * allowed but ignored. - *

If {@code validate_indices} is {@code True}, this op validates the order and range of {@code set} - * indices. + * Number of unique elements along last dimension of input {@code set}. Input {@code set} is a + * {@code SparseTensor} represented by {@code set_indices}, {@code set_values}, and {@code + * set_shape}. The last dimension contains values in a set, duplicates are allowed but ignored. + * + *

If {@code validate_indices} is {@code True}, this op validates the order and range of {@code + * set} indices. * * @param setIndices 2D {@code Tensor}, indices of a {@code SparseTensor}. * @param setValues 1D {@code Tensor}, values of a {@code SparseTensor}. @@ -5546,16 +5927,21 @@ public SetDiff1d setDiff1d(Operand * @param options carries optional attribute values * @return a new instance of SetSize */ - public SetSize setSize(Operand setIndices, Operand setValues, - Operand setShape, SetSize.Options... options) { + public SetSize setSize( + Operand setIndices, + Operand setValues, + Operand setShape, + SetSize.Options... options) { return SetSize.create(scope, setIndices, setValues, setShape, options); } /** - * Returns the shape of a tensor. - * This operation returns a 1-D integer tensor representing the shape of {@code input}. - *

For example: - *

+   * Returns the shape of a tensor. This operation returns a 1-D integer tensor representing the
+   * shape of {@code input}.
+   *
+   * 

For example: + * + *

    *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
    *  shape(t) ==> [2, 2, 3]
    *  
@@ -5569,10 +5955,12 @@ public org.tensorflow.op.core.Shape shape(Operand input } /** - * Returns the shape of a tensor. - * This operation returns a 1-D integer tensor representing the shape of {@code input}. - *

For example: - *

+   * Returns the shape of a tensor. This operation returns a 1-D integer tensor representing the
+   * shape of {@code input}.
+   *
+   * 

For example: + * + *

    *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
    *  shape(t) ==> [2, 2, 3]
    *  
@@ -5583,14 +5971,14 @@ public org.tensorflow.op.core.Shape shape(Operand input * @param data type for {@code Shape} output and operands * @return a new instance of Shape */ - public org.tensorflow.op.core.Shape shape(Operand input, - Class outType) { + public org.tensorflow.op.core.Shape shape( + Operand input, Class outType) { return org.tensorflow.op.core.Shape.create(scope, input, outType); } /** - * Returns shape of tensors. - * This operation returns N 1-D integer tensors representing shape of {@code input[i]s}. + * Returns shape of tensors. This operation returns N 1-D integer tensors representing shape of + * {@code input[i]s}. * * @param data type for {@code output} output * @param input the input value @@ -5601,8 +5989,8 @@ public ShapeN shapeN(Iterable> input) { } /** - * Returns shape of tensors. - * This operation returns N 1-D integer tensors representing shape of {@code input[i]s}. + * Returns shape of tensors. This operation returns N 1-D integer tensors representing shape of + * {@code input[i]s}. * * @param data type for {@code output} output * @param input the input value @@ -5610,17 +5998,18 @@ public ShapeN shapeN(Iterable> input) { * @param data type for {@code ShapeN} output and operands * @return a new instance of ShapeN */ - public ShapeN shapeN(Iterable> input, - Class outType) { + public ShapeN shapeN( + Iterable> input, Class outType) { return ShapeN.create(scope, input, outType); } /** - * Returns the size of a tensor. - * This operation returns an integer representing the number of elements in - * {@code input}. - *

For example: - *

+   * Returns the size of a tensor. This operation returns an integer representing the number of
+   * elements in {@code input}.
+   *
+   * 

For example: + * + *

    *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
    *  size(t) ==> 12
    *  
@@ -5634,11 +6023,12 @@ public Size size(Operand input) { } /** - * Returns the size of a tensor. - * This operation returns an integer representing the number of elements in - * {@code input}. - *

For example: - *

+   * Returns the size of a tensor. This operation returns an integer representing the number of
+   * elements in {@code input}.
+   *
+   * 

For example: + * + *

    *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
    *  size(t) ==> 12
    *  
@@ -5666,27 +6056,23 @@ public Skipgram skipgram(String filename, Long batchSize, Skipgram.Options... op } /** - * Return a slice from 'input'. - * The output tensor is a tensor with dimensions described by 'size' - * whose values are extracted from 'input' starting at the offsets in - * 'begin'. - *

Requirements: - * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) + * Return a slice from 'input'. The output tensor is a tensor with dimensions described by 'size' + * whose values are extracted from 'input' starting at the offsets in 'begin'. + * + *

Requirements: 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) * * @param data type for {@code output} output * @param input the input value - * @param begin begin[i] specifies the offset into the 'i'th dimension of - * 'input' to slice from. - * @param sizeOutput size[i] specifies the number of elements of the 'i'th dimension - * of 'input' to slice. If size[i] is -1, all remaining elements in dimension - * i are included in the slice (i.e. this is equivalent to setting - * size[i] = input.dim_size(i) - begin[i]). + * @param begin begin[i] specifies the offset into the 'i'th dimension of 'input' to slice from. + * @param sizeOutput size[i] specifies the number of elements of the 'i'th dimension of 'input' to + * slice. If size[i] is -1, all remaining elements in dimension i are included in the slice + * (i.e. this is equivalent to setting size[i] = input.dim_size(i) - begin[i]). * @param data type for {@code Slice} output and operands * @param data type for {@code Slice} output and operands * @return a new instance of Slice */ - public Slice slice(Operand input, Operand begin, - Operand sizeOutput) { + public Slice slice( + Operand input, Operand begin, Operand sizeOutput) { return Slice.create(scope, input, begin, sizeOutput); } @@ -5703,117 +6089,121 @@ public Snapshot snapshot(Operand input) { } /** - * SpaceToBatch for N-D tensors of type T. - * This operation divides "spatial" dimensions {@code [1, ..., M]} of the input into a - * grid of blocks of shape {@code block_shape}, and interleaves these blocks with the - * "batch" dimension (0) such that in the output, the spatial dimensions - * {@code [1, ..., M]} correspond to the position within the grid, and the batch - * dimension combines both the position within a spatial block and the original - * batch position. Prior to division into blocks, the spatial dimensions of the - * input are optionally zero padded according to {@code paddings}. See below for a - * precise description. - *

This operation is equivalent to the following steps: - *

    - *
  1. - *

    Zero-pad the start and end of dimensions {@code [1, ..., M]} of the - * input according to {@code paddings} to produce {@code padded} of shape {@code padded_shape}. - *

  2. - *
  3. - *

    Reshape {@code padded} to {@code reshaped_padded} of shape: - *

    [batch] + - * [padded_shape[1] / block_shape[0], - * block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1], - * block_shape[M-1]] + - * remaining_shape - *

  4. - *
  5. - *

    Permute dimensions of {@code reshaped_padded} to produce - * {@code permuted_reshaped_padded} of shape: - *

    block_shape + - * [batch] + - * [padded_shape[1] / block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1]] + - * remaining_shape - *

  6. - *
  7. - *

    Reshape {@code permuted_reshaped_padded} to flatten {@code block_shape} into the batch - * dimension, producing an output tensor of shape: - *

    [batch * prod(block_shape)] + - * [padded_shape[1] / block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1]] + - * remaining_shape - *

  8. - *
- *

Some examples: - *

(1) For the following input of shape {@code [1, 2, 2, 1]}, {@code block_shape = [2, 2]}, and - * {@code paddings = [[0, 0], [0, 0]]}: - *

+   * SpaceToBatch for N-D tensors of type T. This operation divides "spatial" dimensions
+   * {@code [1, ..., M]} of the input into a grid of blocks of shape {@code block_shape}, and
+   * interleaves these blocks with the "batch" dimension (0) such that in the output, the
+   * spatial dimensions {@code [1, ..., M]} correspond to the position within the grid, and the
+   * batch dimension combines both the position within a spatial block and the original batch
+   * position. Prior to division into blocks, the spatial dimensions of the input are optionally
+   * zero padded according to {@code paddings}. See below for a precise description.
+   *
+   * 

This operation is equivalent to the following steps: + * + *

    + *
  1. + *

    Zero-pad the start and end of dimensions {@code [1, ..., M]} of the input according to + * {@code paddings} to produce {@code padded} of shape {@code padded_shape}. + *

  2. + *

    Reshape {@code padded} to {@code reshaped_padded} of shape: + *

    [batch] + [padded_shape[1] / block_shape[0], block_shape[0], ..., padded_shape[M] / + * block_shape[M-1], block_shape[M-1]] + remaining_shape + *

  3. + *

    Permute dimensions of {@code reshaped_padded} to produce {@code + * permuted_reshaped_padded} of shape: + *

    block_shape + [batch] + [padded_shape[1] / block_shape[0], ..., padded_shape[M] / + * block_shape[M-1]] + remaining_shape + *

  4. + *

    Reshape {@code permuted_reshaped_padded} to flatten {@code block_shape} into the batch + * dimension, producing an output tensor of shape: + *

    [batch * prod(block_shape)] + [padded_shape[1] / block_shape[0], ..., padded_shape[M] + * / block_shape[M-1]] + remaining_shape + *

+ * + *

Some examples: + * + *

(1) For the following input of shape {@code [1, 2, 2, 1]}, {@code block_shape = [2, 2]}, and + * {@code paddings = [[0, 0], [0, 0]]}: + * + *

    *  x = [[[[1], [2]], [[3], [4]]]]
    *  
- *

The output tensor has shape {@code [4, 1, 1, 1]} and value: - *

+   *
+   * 

The output tensor has shape {@code [4, 1, 1, 1]} and value: + * + *

    *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
    *  
- *

(2) For the following input of shape {@code [1, 2, 2, 3]}, {@code block_shape = [2, 2]}, and - * {@code paddings = [[0, 0], [0, 0]]}: - *

+   *
+   * 

(2) For the following input of shape {@code [1, 2, 2, 3]}, {@code block_shape = [2, 2]}, and + * {@code paddings = [[0, 0], [0, 0]]}: + * + *

    *  x = [[[[1, 2, 3], [4, 5, 6]],
    *        [[7, 8, 9], [10, 11, 12]]]]
    *  
- *

The output tensor has shape {@code [4, 1, 1, 3]} and value: - *

+   *
+   * 

The output tensor has shape {@code [4, 1, 1, 3]} and value: + * + *

    *  [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
    *  
- *

(3) For the following input of shape {@code [1, 4, 4, 1]}, {@code block_shape = [2, 2]}, and - * {@code paddings = [[0, 0], [0, 0]]}: - *

+   *
+   * 

(3) For the following input of shape {@code [1, 4, 4, 1]}, {@code block_shape = [2, 2]}, and + * {@code paddings = [[0, 0], [0, 0]]}: + * + *

    *  x = [[[[1],   [2],  [3],  [4]],
    *        [[5],   [6],  [7],  [8]],
    *        [[9],  [10], [11],  [12]],
    *        [[13], [14], [15],  [16]]]]
    *  
- *

The output tensor has shape {@code [4, 2, 2, 1]} and value: - *

+   *
+   * 

The output tensor has shape {@code [4, 2, 2, 1]} and value: + * + *

    *  x = [[[[1], [3]], [[9], [11]]],
    *       [[[2], [4]], [[10], [12]]],
    *       [[[5], [7]], [[13], [15]]],
    *       [[[6], [8]], [[14], [16]]]]
    *  
- *

(4) For the following input of shape {@code [2, 2, 4, 1]}, block_shape = {@code [2, 2]}, and - * paddings = {@code [[0, 0], [2, 0]]}: - *

+   *
+   * 

(4) For the following input of shape {@code [2, 2, 4, 1]}, block_shape = {@code [2, 2]}, and + * paddings = {@code [[0, 0], [2, 0]]}: + * + *

    *  x = [[[[1],   [2],  [3],  [4]],
    *        [[5],   [6],  [7],  [8]]],
    *       [[[9],  [10], [11],  [12]],
    *        [[13], [14], [15],  [16]]]]
    *  
- *

The output tensor has shape {@code [8, 1, 3, 1]} and value: - *

+   *
+   * 

The output tensor has shape {@code [8, 1, 3, 1]} and value: + * + *

    *  x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
    *       [[[0], [2], [4]]], [[[0], [10], [12]]],
    *       [[[0], [5], [7]]], [[[0], [13], [15]]],
    *       [[[0], [6], [8]]], [[[0], [14], [16]]]]
    *  
- *

Among others, this operation is useful for reducing atrous convolution into - * regular convolution. + * + *

Among others, this operation is useful for reducing atrous convolution into regular + * convolution. * * @param data type for {@code output} output * @param input N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape}, - * where spatial_shape has {@code M} dimensions. + * where spatial_shape has {@code M} dimensions. * @param blockShape 1-D with shape {@code [M]}, all values must be >= 1. - * @param paddings 2-D with shape {@code [M, 2]}, all values must be >= 0. - * {@code paddings[i] = [pad_start, pad_end]} specifies the padding for input dimension - * {@code i + 1}, which corresponds to spatial dimension {@code i}. It is required that - * {@code block_shape[i]} divides {@code input_shape[i + 1] + pad_start + pad_end}. + * @param paddings 2-D with shape {@code [M, 2]}, all values must be >= 0. {@code paddings[i] = + * [pad_start, pad_end]} specifies the padding for input dimension {@code i + 1}, which + * corresponds to spatial dimension {@code i}. It is required that {@code block_shape[i]} + * divides {@code input_shape[i + 1] + pad_start + pad_end}. * @param data type for {@code SpaceToBatchND} output and operands * @return a new instance of SpaceToBatchNd */ - public SpaceToBatchNd spaceToBatchNd(Operand input, - Operand blockShape, Operand paddings) { + public SpaceToBatchNd spaceToBatchNd( + Operand input, + Operand blockShape, + Operand paddings) { return SpaceToBatchNd.create(scope, input, blockShape, paddings); } @@ -5821,11 +6211,10 @@ public SpaceToBatchNd spaceToBatchNd(Operand input, * Splits a tensor into {@code num_split} tensors along one dimension. * * @param data type for {@code output} output - * @param axis 0-D. The dimension along which to split. Must be in the range - * {@code [-rank(value), rank(value))}. + * @param axis 0-D. The dimension along which to split. Must be in the range {@code [-rank(value), + * rank(value))}. * @param value The tensor to split. - * @param numSplit The number of ways to split. Must evenly divide - * {@code value.shape[split_dim]}. + * @param numSplit The number of ways to split. Must evenly divide {@code value.shape[split_dim]}. * @param data type for {@code Split} output and operands * @return a new instance of Split */ @@ -5838,33 +6227,39 @@ public Split split(Operand axis, Operand value, * * @param data type for {@code output} output * @param value The tensor to split. - * @param sizeSplits list containing the sizes of each output tensor along the split - * dimension. Must sum to the dimension of value along split_dim. - * Can contain one -1 indicating that dimension is to be inferred. - * @param axis 0-D. The dimension along which to split. Must be in the range - * {@code [-rank(value), rank(value))}. + * @param sizeSplits list containing the sizes of each output tensor along the split dimension. + * Must sum to the dimension of value along split_dim. Can contain one -1 indicating that + * dimension is to be inferred. + * @param axis 0-D. The dimension along which to split. Must be in the range {@code [-rank(value), + * rank(value))}. * @param numSplit the value of the numSplit property * @param data type for {@code SplitV} output and operands * @return a new instance of SplitV */ - public SplitV splitV(Operand value, Operand sizeSplits, - Operand axis, Long numSplit) { + public SplitV splitV( + Operand value, + Operand sizeSplits, + Operand axis, + Long numSplit) { return SplitV.create(scope, value, sizeSplits, axis, numSplit); } /** - * Removes dimensions of size 1 from the shape of a tensor. - * Given a tensor {@code input}, this operation returns a tensor of the same type with - * all dimensions of size 1 removed. If you don't want to remove all size 1 - * dimensions, you can remove specific size 1 dimensions by specifying - * {@code axis}. - *

For example: - *

+   * Removes dimensions of size 1 from the shape of a tensor. Given a tensor {@code input}, this
+   * operation returns a tensor of the same type with all dimensions of size 1 removed. If you don't
+   * want to remove all size 1 dimensions, you can remove specific size 1 dimensions by specifying
+   * {@code axis}.
+   *
+   * 

For example: + * + *

    *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
    *  shape(squeeze(t)) ==> [2, 3]
    *  
- *

Or, to remove specific size 1 dimensions: - *

+   *
+   * 

Or, to remove specific size 1 dimensions: + * + *

    *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
    *  shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
    *  
@@ -5880,22 +6275,26 @@ public Squeeze squeeze(Operand input, Squeeze.Options... } /** - * Packs a list of {@code N} rank-{@code R} tensors into one rank-{@code (R+1)} tensor. - * Packs the {@code N} tensors in {@code values} into a tensor with rank one higher than each - * tensor in {@code values}, by packing them along the {@code axis} dimension. - * Given a list of tensors of shape {@code (A, B, C)}; - *

if {@code axis == 0} then the {@code output} tensor will have the shape {@code (N, A, B, C)}. - * if {@code axis == 1} then the {@code output} tensor will have the shape {@code (A, N, B, C)}. - * Etc. - *

For example: - *

+   * Packs a list of {@code N} rank-{@code R} tensors into one rank-{@code (R+1)} tensor. Packs the
+   * {@code N} tensors in {@code values} into a tensor with rank one higher than each tensor in
+   * {@code values}, by packing them along the {@code axis} dimension. Given a list of tensors of
+   * shape {@code (A, B, C)};
+   *
+   * 

if {@code axis == 0} then the {@code output} tensor will have the shape {@code (N, A, B, + * C)}. if {@code axis == 1} then the {@code output} tensor will have the shape {@code (A, N, B, + * C)}. Etc. + * + *

For example: + * + *

    *  # 'x' is [1, 4]
    *  # 'y' is [2, 5]
    *  # 'z' is [3, 6]
    *  pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
    *  pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
    *  
- *

This is the opposite of {@code unpack}. + * + *

This is the opposite of {@code unpack}. * * @param data type for {@code output} output * @param values Must be of same shape and type. @@ -5908,12 +6307,11 @@ public Stack stack(Iterable> values, Stack.Optio } /** - * Stage values similar to a lightweight Enqueue. - * The basic functionality of this Op is similar to a queue with many - * fewer capabilities and options. This Op is optimized for performance. + * Stage values similar to a lightweight Enqueue. The basic functionality of this Op is similar to + * a queue with many fewer capabilities and options. This Op is optimized for performance. * - * @param values a list of tensors - * dtypes A list of data types that inserted values should adhere to. + * @param values a list of tensors dtypes A list of data types that inserted values should adhere + * to. * @param options carries optional attribute values * @return a new instance of Stage */ @@ -5933,18 +6331,16 @@ public StageClear stageClear(List> dtypes, StageClear.Opt } /** - * Op peeks at the values at the specified index. If the - * underlying container does not contain sufficient elements - * this op will block until it does. This Op is optimized for - * performance. + * Op peeks at the values at the specified index. If the underlying container does not contain + * sufficient elements this op will block until it does. This Op is optimized for performance. * * @param index the index value * @param dtypes the value of the dtypes property * @param options carries optional attribute values * @return a new instance of StagePeek */ - public StagePeek stagePeek(Operand index, List> dtypes, - StagePeek.Options... options) { + public StagePeek stagePeek( + Operand index, List> dtypes, StagePeek.Options... options) { return StagePeek.create(scope, index, dtypes, options); } @@ -5961,7 +6357,8 @@ public StageSize stageSize(List> dtypes, StageSize.Option /** * An n-way switch statement which calls a single branch function. - *

+   *
+   * 
    *  An n-way switch statement, implementing the following:
    *  ```
    *  switch (branch_index) {
@@ -5983,22 +6380,29 @@ public StageSize stageSize(List> dtypes, StageSize.Option
    * @param branchIndex The branch selector, an int32 Tensor.
    * @param input A list of input tensors passed to the branch function.
    * @param Tout A list of output types.
-   * @param branches 
+   * @param branches
+   *     
    *    A list of functions each of which takes 'inputs' and returns a list of
    *    tensors, whose types are the same as what every other branch returns.
    *  
+ * * @param options carries optional attribute values * @return a new instance of StatefulCase */ - public StatefulCase statefulCase(Operand branchIndex, Iterable> input, - List> Tout, List branches, Case.Options... options) { + public StatefulCase statefulCase( + Operand branchIndex, + Iterable> input, + List> Tout, + List branches, + Case.Options... options) { return StatefulCase.create(scope, branchIndex, input, Tout, branches, options); } /** * output = cond ? then_branch(input) : else_branch(input) * - * @param cond
+   * @param cond
+   *     
    *    A Tensor. If the tensor is a scalar of non-boolean type, the
    *    scalar is converted to a boolean according to the
    *    following rule: if the scalar is a numerical value, non-zero means
@@ -6006,21 +6410,30 @@ public StatefulCase statefulCase(Operand branchIndex, Iterable
+   *
    * @param input A list of input tensors.
    * @param Tout A list of output types.
-   * @param thenBranch 
+   * @param thenBranch
+   *     
    *    A function that takes 'inputs' and returns a list of tensors, whose
    *    types are the same as what else_branch returns.
    *  
- * @param elseBranch
+   *
+   * @param elseBranch
+   *     
    *  A function that takes 'inputs' and returns a list of tensors, whose
    *  types are the same as what then_branch returns.
    *  
+ * * @param options carries optional attribute values * @return a new instance of StatefulIf */ - public StatefulIf statefulIf(Operand cond, Iterable> input, - List> Tout, ConcreteFunction thenBranch, ConcreteFunction elseBranch, + public StatefulIf statefulIf( + Operand cond, + Iterable> input, + List> Tout, + ConcreteFunction thenBranch, + ConcreteFunction elseBranch, If.Options... options) { return StatefulIf.create(scope, cond, input, Tout, thenBranch, elseBranch, options); } @@ -6030,18 +6443,23 @@ public StatefulIf statefulIf(Operand cond, Iterable> * * @param args A list of input tensors. * @param Tout A list of output types. - * @param f
+   * @param f
+   *     
    *    A function that takes 'args', a list of tensors, and returns 'output',
    *    another list of tensors. Input and output types are specified by 'Tin'
    *    and 'Tout'. The function body of f will be placed and partitioned across
    *    devices, setting this op apart from the regular Call op. This op is
    *    stateful.
    *  
+ * * @param options carries optional attribute values * @return a new instance of StatefulPartitionedCall */ - public StatefulPartitionedCall statefulPartitionedCall(Iterable> args, - List> Tout, ConcreteFunction f, PartitionedCall.Options... options) { + public StatefulPartitionedCall statefulPartitionedCall( + Iterable> args, + List> Tout, + ConcreteFunction f, + PartitionedCall.Options... options) { return StatefulPartitionedCall.create(scope, args, Tout, f, options); } @@ -6049,7 +6467,8 @@ public StatefulPartitionedCall statefulPartitionedCall(Iterable> args * output = input; While (Cond(output)) { output = Body(output) } * * @param input A list of input tensors whose types are T. - * @param cond
+   * @param cond
+   *     
    *    A function takes 'input' and returns a tensor.  If the tensor is
    *    a scalar of non-boolean, the scalar is converted to a boolean
    *    according to the following rule: if the scalar is a numerical
@@ -6058,23 +6477,30 @@ public StatefulPartitionedCall statefulPartitionedCall(Iterable> args
    *    tensor is not a scalar, non-emptiness means True and False
    *    otherwise.
    *  
- * @param body
+   *
+   * @param body
+   *     
    *    A function that takes a list of tensors and returns another
    *    list of tensors. Both lists have the same types as specified
    *    by T.
    *  
+ * * @param options carries optional attribute values * @return a new instance of StatefulWhile */ - public StatefulWhile statefulWhile(Iterable> input, ConcreteFunction cond, - ConcreteFunction body, While.Options... options) { + public StatefulWhile statefulWhile( + Iterable> input, + ConcreteFunction cond, + ConcreteFunction body, + While.Options... options) { return StatefulWhile.create(scope, input, cond, body, options); } /** * output = cond ? then_branch(input) : else_branch(input) * - * @param cond
+   * @param cond
+   *     
    *    A Tensor. If the tensor is a scalar of non-boolean type, the
    *    scalar is converted to a boolean according to the
    *    following rule: if the scalar is a numerical value, non-zero means
@@ -6085,44 +6511,58 @@ public StatefulWhile statefulWhile(Iterable> input, ConcreteFunction
    *    This should only be used when the if then/else body functions do not
    *    have stateful ops.
    *  
+ * * @param input A list of input tensors. * @param Tout A list of output types. - * @param thenBranch
+   * @param thenBranch
+   *     
    *    A function that takes 'inputs' and returns a list of tensors, whose
    *    types are the same as what else_branch returns.
    *  
- * @param elseBranch
+   *
+   * @param elseBranch
+   *     
    *  A function that takes 'inputs' and returns a list of tensors, whose
    *  types are the same as what then_branch returns.
    *  
+ * * @param options carries optional attribute values * @return a new instance of StatelessIf */ - public StatelessIf statelessIf(Operand cond, Iterable> input, - List> Tout, ConcreteFunction thenBranch, ConcreteFunction elseBranch, + public StatelessIf statelessIf( + Operand cond, + Iterable> input, + List> Tout, + ConcreteFunction thenBranch, + ConcreteFunction elseBranch, If.Options... options) { return StatelessIf.create(scope, cond, input, Tout, thenBranch, elseBranch, options); } /** - * returns {@code f(inputs)}, where {@code f}'s body is placed and partitioned. - * Asynchronously executes a function, potentially across multiple devices but - * within a single process. The kernel places and partitions a given function's - * underlying graph, and executes each of the partitioned subgraphs as a function. + * returns {@code f(inputs)}, where {@code f}'s body is placed and partitioned. Asynchronously + * executes a function, potentially across multiple devices but within a single process. The + * kernel places and partitions a given function's underlying graph, and executes each of the + * partitioned subgraphs as a function. * * @param args A list of input tensors. * @param Tout A list of output types. - * @param f
+   * @param f
+   *     
    *    A function that takes 'args', a list of tensors, and returns 'output',
    *    another list of tensors. Input and output types are specified by 'Tin'
    *    and 'Tout'. The function body of f will be placed and partitioned across
    *    devices, setting this op apart from the regular Call op.
    *  
+ * * @param options carries optional attribute values * @return a new instance of StatelessPartitionedCall */ - public StatelessPartitionedCall statelessPartitionedCall(Iterable> args, - List> Tout, ConcreteFunction f, PartitionedCall.Options... options) { + public StatelessPartitionedCall statelessPartitionedCall( + Iterable> args, + List> Tout, + ConcreteFunction f, + PartitionedCall.Options... options) { return StatelessPartitionedCall.create(scope, args, Tout, f, options); } @@ -6130,7 +6570,8 @@ public StatelessPartitionedCall statelessPartitionedCall(Iterable> ar * output = input; While (Cond(output)) { output = Body(output) } * * @param input A list of input tensors whose types are T. - * @param cond
+   * @param cond
+   *     
    *    A function takes 'input' and returns a tensor.  If the tensor is
    *    a scalar of non-boolean, the scalar is converted to a boolean
    *    according to the following rule: if the scalar is a numerical
@@ -6142,42 +6583,49 @@ public StatelessPartitionedCall statelessPartitionedCall(Iterable> ar
    *    This should only be used when the while condition and body functions
    *    do not have stateful ops.
    *  
- * @param body
+   *
+   * @param body
+   *     
    *    A function that takes a list of tensors and returns another
    *    list of tensors. Both lists have the same types as specified
    *    by T.
    *  
+ * * @param options carries optional attribute values * @return a new instance of StatelessWhile */ - public StatelessWhile statelessWhile(Iterable> input, ConcreteFunction cond, - ConcreteFunction body, While.Options... options) { + public StatelessWhile statelessWhile( + Iterable> input, + ConcreteFunction cond, + ConcreteFunction body, + While.Options... options) { return StatelessWhile.create(scope, input, cond, body, options); } /** - * Stops gradient computation. - * When executed in a graph, this op outputs its input tensor as-is. - *

When building ops to compute gradients, this op prevents the contribution of - * its inputs to be taken into account. Normally, the gradient generator adds ops - * to a graph to compute the derivatives of a specified 'loss' by recursively - * finding out inputs that contributed to its computation. If you insert this op - * in the graph it inputs are masked from the gradient generator. They are not - * taken into account for computing gradients. - *

This is useful any time you want to compute a value with TensorFlow but need - * to pretend that the value was a constant. For example, the softmax function - * for a vector x can be written as - *

+   * Stops gradient computation. When executed in a graph, this op outputs its input tensor as-is.
+   *
+   * 

When building ops to compute gradients, this op prevents the contribution of its inputs to + * be taken into account. Normally, the gradient generator adds ops to a graph to compute the + * derivatives of a specified 'loss' by recursively finding out inputs that contributed to its + * computation. If you insert this op in the graph it inputs are masked from the gradient + * generator. They are not taken into account for computing gradients. + * + *

This is useful any time you want to compute a value with TensorFlow but need to pretend that + * the value was a constant. For example, the softmax function for a vector x can be written as + * + *

    *
    *    def softmax(x):
    *      numerator = tf.exp(x)
    *      denominator = tf.reduce_sum(numerator)
    *      return numerator / denominator
    *  
- *

This however is susceptible to overflow if the values in x are large. An - * alternative more stable way is to subtract the maximum of x from each of the - * values. - *

+   *
+   * 

This however is susceptible to overflow if the values in x are large. An alternative more + * stable way is to subtract the maximum of x from each of the values. + * + *

    *
    *    def stable_softmax(x):
    *      z = x - tf.reduce_max(x)
@@ -6185,11 +6633,12 @@ public StatelessWhile statelessWhile(Iterable> input, ConcreteFunctio
    *      denominator = tf.reduce_sum(numerator)
    *      return numerator / denominator
    *  
- *

However, when we backprop through the softmax to x, we dont want to backprop - * through the {@code tf.reduce_max(x)} (if the max values are not unique then the - * gradient could flow to the wrong input) calculation and treat that as a - * constant. Therefore, we should write this out as - *

+   *
+   * 

However, when we backprop through the softmax to x, we dont want to backprop through the + * {@code tf.reduce_max(x)} (if the max values are not unique then the gradient could flow to the + * wrong input) calculation and treat that as a constant. Therefore, we should write this out as + * + *

    *
    *    def stable_softmax(x):
    *      z = x - tf.stop_gradient(tf.reduce_max(x))
@@ -6197,16 +6646,18 @@ public StatelessWhile statelessWhile(Iterable> input, ConcreteFunctio
    *      denominator = tf.reduce_sum(numerator)
    *      return numerator / denominator
    *  
- *

Some other examples include: - *

    - *
  • The EM algorithm where the M-step should not involve backpropagation - * through the output of the E-step.
  • - *
  • Contrastive divergence training of Boltzmann machines where, when - * differentiating the energy function, the training must not backpropagate - * through the graph that generated the samples from the model.
  • - *
  • Adversarial training, where no backprop should happen through the adversarial - * example generation process.
  • - *
+ * + *

Some other examples include: + * + *

    + *
  • The EM algorithm where the M-step should not involve backpropagation + * through the output of the E-step. + *
  • Contrastive divergence training of Boltzmann machines where, when differentiating the + * energy function, the training must not backpropagate through the graph that generated the + * samples from the model. + *
  • Adversarial training, where no backprop should happen through the adversarial example + * generation process. + *
* * @param data type for {@code output} output * @param input the input value @@ -6219,46 +6670,51 @@ public StopGradient stopGradient(Operand input) { /** * Return a strided slice from `input`. - *

- * The goal of this op is to produce a new tensor with a subset of the elements from the `n` dimensional `input` - * tensor. The subset is chosen using a sequence of `m` sparse range specifications encoded into the arguments of this - * function. Note, in some cases `m` could be equal to `n`, but this need not be the case. Each range specification - * entry can be one of the following: - *

- * - An ellipsis (...) using {@link Indices#ellipsis()}. Ellipses are used to imply zero or more dimensions of - * full-dimension selection. For example, {@code stridedSlice(foo, Indices.ellipsis()} is the identity slice. - *

- * - A new axis using {@link Indices#newAxis()}. This is used to insert a new shape=1 dimension. - * For example, `{@code stridedSlice(foo, Indices.newAxis())} where {@code foo} is shape {@code (3, 4)} - * produces a {@code (1, 3, 4)} tensor. - *

- * - A range {@code begin:end:stride} using {@link Indices#slice(Long, Long, long)} Index.slice()} or {@link Indices#all()}. This is used to specify - * how much to choose from a given dimension. {@code stride} can be any integer but 0. {@code begin} is an integer which - * represents the index of the first value to select while {@code end} represents the index of the last value to select - * (exclusive). Begin and end can be null, in which case the index begins or ends at the beginning or end of the dimension, - * respectively (reversed if stride is negative). When both are null, {@code slice()} is the same as {@code all()}. - * The number of values selected in each dimension is {@code end - begin} if {@code stride > 0} and {@code begin - end} - * if {@code stride < 0}. {@code begin} and {@code end} can be negative where {@code -1} is the last element, {@code -2} - * is the second to last. For example, given a shape {@code (3,)} tensor {@code stridedSlice(foo, Indices.all())}, the - * effective {@code begin} and {@code end} are {@code 0} and {@code 3}. Do not assume this is equivalent to - * {@code stridedSlice(foo, Indices.slice(0, -1))} which has an effective {@code begin} and {@code end} of {@code 0} and - * {@code 2}. Another example is {@code stridedSlice(foo, Indices.slice(-2, null, -1))} which reverses the first dimension - * of a tensor while dropping the last two (in the original order elements). For example {@code foo = [1,2,3,4]; - * stridedSlice(foo, Indices.slice(-2, null, -1)} is {@code [4,3]}. - *

- * - A single index using {@link Indices#at(long)}. This is used to keep only elements that have a given index. For - * example ({@code stridedSlice(foo, Indices.at(2))} on a shape {@code (5,6)} tensor produces a shape {@code (6,)} tensor. - * The dimension can be kept with size one using {@link Indices#at(long, boolean)}. - *

- * These semantics generally follow NumPy's indexing semantics, which can be found here: - * https://numpy.org/doc/stable/reference/arrays.indexing.html - *

- * - * Requirements: - * `0 != strides[i] for i in [0, m)` Only one ellipsis. + * + *

The goal of this op is to produce a new tensor with a subset of the elements from the `n` + * dimensional `input` tensor. The subset is chosen using a sequence of `m` sparse range + * specifications encoded into the arguments of this function. Note, in some cases `m` could be + * equal to `n`, but this need not be the case. Each range specification entry can be one of the + * following: + * + *

- An ellipsis (...) using {@link Indices#ellipsis()}. Ellipses are used to imply zero or + * more dimensions of full-dimension selection. For example, {@code stridedSlice(foo, + * Indices.ellipsis()} is the identity slice. + * + *

- A new axis using {@link Indices#newAxis()}. This is used to insert a new shape=1 + * dimension. For example, `{@code stridedSlice(foo, Indices.newAxis())} where {@code foo} is + * shape {@code (3, 4)} produces a {@code (1, 3, 4)} tensor. + * + *

- A range {@code begin:end:stride} using {@link Indices#slice(Long, Long, long)} + * Index.slice()} or {@link Indices#all()}. This is used to specify how much to choose from a + * given dimension. {@code stride} can be any integer but 0. {@code begin} is an integer which + * represents the index of the first value to select while {@code end} represents the index of the + * last value to select (exclusive). Begin and end can be null, in which case the index begins or + * ends at the beginning or end of the dimension, respectively (reversed if stride is negative). + * When both are null, {@code slice()} is the same as {@code all()}. The number of values selected + * in each dimension is {@code end - begin} if {@code stride > 0} and {@code begin - end} if + * {@code stride < 0}. {@code begin} and {@code end} can be negative where {@code -1} is the last + * element, {@code -2} is the second to last. For example, given a shape {@code (3,)} tensor + * {@code stridedSlice(foo, Indices.all())}, the effective {@code begin} and {@code end} are + * {@code 0} and {@code 3}. Do not assume this is equivalent to {@code stridedSlice(foo, + * Indices.slice(0, -1))} which has an effective {@code begin} and {@code end} of {@code 0} and + * {@code 2}. Another example is {@code stridedSlice(foo, Indices.slice(-2, null, -1))} which + * reverses the first dimension of a tensor while dropping the last two (in the original order + * elements). For example {@code foo = [1,2,3,4]; stridedSlice(foo, Indices.slice(-2, null, -1)} + * is {@code [4,3]}. + * + *

- A single index using {@link Indices#at(long)}. This is used to keep only elements that + * have a given index. For example ({@code stridedSlice(foo, Indices.at(2))} on a shape {@code + * (5,6)} tensor produces a shape {@code (6,)} tensor. The dimension can be kept with size one + * using {@link Indices#at(long, boolean)}. + * + *

These semantics generally follow NumPy's indexing semantics, which can be found here: https://numpy.org/doc/stable/reference/arrays.indexing.html + * + *

Requirements: `0 != strides[i] for i in [0, m)` Only one ellipsis. * * @param data type for {@code output()} output - * @param indices The indices to slice. See {@link Indices}. + * @param indices The indices to slice. See {@link Indices}. * @return a new instance of StridedSlice * @see Indices */ @@ -6267,55 +6723,52 @@ public StridedSlice stridedSlice(Operand input, Index... } /** - * Return a strided slice from {@code input}. - * Note, most python users will want to use the Python {@code Tensor.__getitem__} - * or {@code Variable.__getitem__} rather than this op directly. - *

The goal of this op is to produce a new tensor with a subset of - * the elements from the {@code n} dimensional {@code input} tensor. The subset is chosen using - * a sequence of {@code m} sparse range specifications encoded into the arguments - * of this function. Note, in some cases - * {@code m} could be equal to {@code n}, but this need not be the case. Each - * range specification entry can be one of the following: - *

    - *
  • - *

    An ellipsis (...). Ellipses are used to imply zero or more - * dimensions of full-dimension selection and are produced using - * {@code ellipsis_mask}. For example, {@code foo[...]} is the identity slice. - *

  • - *
  • - *

    A new axis. This is used to insert a new shape=1 dimension and is - * produced using {@code new_axis_mask}. For example, {@code foo[:, ...]} where - * {@code foo} is shape {@code (3, 4)} produces a {@code (1, 3, 4)} tensor. - *

  • - *
  • - *

    A range {@code begin:end:stride}. This is used to specify how much to choose from - * a given dimension. {@code stride} can be any integer but 0. {@code begin} is an integer - * which represents the index of the first value to select while {@code end} represents - * the index of the last value to select. The number of values selected in each - * dimension is {@code end - begin} if {@code stride > 0} and {@code begin - end} if {@code stride < 0}. - * {@code begin} and {@code end} can be negative where {@code -1} is the last element, {@code -2} is - * the second to last. {@code begin_mask} controls whether to replace the explicitly - * given {@code begin} with an implicit effective value of {@code 0} if {@code stride > 0} and - * {@code -1} if {@code stride < 0}. {@code end_mask} is analogous but produces the number - * required to create the largest open interval. For example, given a shape - * {@code (3,)} tensor {@code foo[:]}, the effective {@code begin} and {@code end} are {@code 0} and {@code 3}. Do - * not assume this is equivalent to {@code foo[0:-1]} which has an effective {@code begin} - * and {@code end} of {@code 0} and {@code 2}. Another example is {@code foo[-2::-1]} which reverses the - * first dimension of a tensor while dropping the last two (in the original - * order elements). For example {@code foo = [1,2,3,4]; foo[-2::-1]} is {@code [4,3]}. - *

  • - *
  • - *

    A single index. This is used to keep only elements that have a given - * index. For example ({@code foo[2, :]} on a shape {@code (5,6)} tensor produces a - * shape {@code (6,)} tensor. This is encoded in {@code begin} and {@code end} and - * {@code shrink_axis_mask}. - *

  • - *
- *

Each conceptual range specification is encoded in the op's argument. This - * encoding is best understand by considering a non-trivial example. In - * particular, - * {@code foo[1, 2:4, None, ..., :-3:-1, :]} will be encoded as - *

+   * Return a strided slice from {@code input}. Note, most python users will want to use the Python
+   * {@code Tensor.__getitem__} or {@code Variable.__getitem__} rather than this op directly.
+   *
+   * 

The goal of this op is to produce a new tensor with a subset of the elements from the {@code + * n} dimensional {@code input} tensor. The subset is chosen using a sequence of {@code m} sparse + * range specifications encoded into the arguments of this function. Note, in some cases {@code m} + * could be equal to {@code n}, but this need not be the case. Each range specification entry can + * be one of the following: + * + *

    + *
  • + *

    An ellipsis (...). Ellipses are used to imply zero or more dimensions of + * full-dimension selection and are produced using {@code ellipsis_mask}. For example, + * {@code foo[...]} is the identity slice. + *

  • + *

    A new axis. This is used to insert a new shape=1 dimension and is produced using + * {@code new_axis_mask}. For example, {@code foo[:, ...]} where {@code foo} is shape {@code + * (3, 4)} produces a {@code (1, 3, 4)} tensor. + *

  • + *

    A range {@code begin:end:stride}. This is used to specify how much to choose from a + * given dimension. {@code stride} can be any integer but 0. {@code begin} is an integer + * which represents the index of the first value to select while {@code end} represents the + * index of the last value to select. The number of values selected in each dimension is + * {@code end - begin} if {@code stride > 0} and {@code begin - end} if {@code stride < 0}. + * {@code begin} and {@code end} can be negative where {@code -1} is the last element, + * {@code -2} is the second to last. {@code begin_mask} controls whether to replace the + * explicitly given {@code begin} with an implicit effective value of {@code 0} if {@code + * stride > 0} and {@code -1} if {@code stride < 0}. {@code end_mask} is analogous but + * produces the number required to create the largest open interval. For example, given a + * shape {@code (3,)} tensor {@code foo[:]}, the effective {@code begin} and {@code end} are + * {@code 0} and {@code 3}. Do not assume this is equivalent to {@code foo[0:-1]} which has + * an effective {@code begin} and {@code end} of {@code 0} and {@code 2}. Another example is + * {@code foo[-2::-1]} which reverses the first dimension of a tensor while dropping the + * last two (in the original order elements). For example {@code foo = [1,2,3,4]; + * foo[-2::-1]} is {@code [4,3]}. + *

  • + *

    A single index. This is used to keep only elements that have a given index. For + * example ({@code foo[2, :]} on a shape {@code (5,6)} tensor produces a shape {@code (6,)} + * tensor. This is encoded in {@code begin} and {@code end} and {@code shrink_axis_mask}. + *

+ * + *

Each conceptual range specification is encoded in the op's argument. This encoding is best + * understand by considering a non-trivial example. In particular, {@code foo[1, 2:4, None, ..., + * :-3:-1, :]} will be encoded as + * + *

    *  begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
    *  end = [2, 4, x, x, -3, x]
    *  strides = [1, 1, x, x, -1, 1]
@@ -6325,99 +6778,99 @@ public  StridedSlice stridedSlice(Operand input, Index...
    *  new_axis_mask = 1<<2 = 4
    *  shrink_axis_mask = 1<<0 = 1
    *  
- *

In this case if {@code foo.shape} is (5, 5, 5, 5, 5, 5) the final shape of - * the slice becomes (2, 1, 5, 5, 2, 5). - * Let us walk step by step through each argument specification. - *

    - *
  1. - *

    The first argument in the example slice is turned into {@code begin = 1} and - * {@code end = begin + 1 = 2}. To disambiguate from the original spec {@code 2:4} we - * also set the appropriate bit in {@code shrink_axis_mask}. - *

  2. - *
  3. - *

    {@code 2:4} is contributes 2, 4, 1 to begin, end, and stride. All masks have - * zero bits contributed. - *

  4. - *
  5. - *

    None is a synonym for {@code tf.newaxis}. This means insert a dimension of size 1 - * dimension in the final shape. Dummy values are contributed to begin, - * end and stride, while the new_axis_mask bit is set. - *

  6. - *
  7. - *

    {@code ...} grab the full ranges from as many dimensions as needed to - * fully specify a slice for every dimension of the input shape. - *

  8. - *
  9. - *

    {@code :-3:-1} shows the use of negative indices. A negative index {@code i} associated - * with a dimension that has shape {@code s} is converted to a positive index - * {@code s + i}. So {@code -1} becomes {@code s-1} (i.e. the last element). This conversion - * is done internally so begin, end and strides receive x, -3, and -1. - * The appropriate begin_mask bit is set to indicate the start range is the - * full range (ignoring the x). - *

  10. - *
  11. - *

    {@code :} indicates that the entire contents of the corresponding dimension - * is selected. This is equivalent to {@code ::} or {@code 0::1}. begin, end, and strides - * receive 0, 0, and 1, respectively. The appropriate bits in {@code begin_mask} and - * {@code end_mask} are also set. - *

  12. - *
- *

Requirements: - * {@code 0 != strides[i] for i in [0, m)} - * {@code ellipsis_mask must be a power of two (only one ellipsis)} + * + *

In this case if {@code foo.shape} is (5, 5, 5, 5, 5, 5) the final shape of the slice becomes + * (2, 1, 5, 5, 2, 5). Let us walk step by step through each argument specification. + * + *

    + *
  1. + *

    The first argument in the example slice is turned into {@code begin = 1} and {@code + * end = begin + 1 = 2}. To disambiguate from the original spec {@code 2:4} we also set the + * appropriate bit in {@code shrink_axis_mask}. + *

  2. + *

    {@code 2:4} is contributes 2, 4, 1 to begin, end, and stride. All masks have zero bits + * contributed. + *

  3. + *

    None is a synonym for {@code tf.newaxis}. This means insert a dimension of size 1 + * dimension in the final shape. Dummy values are contributed to begin, end and stride, + * while the new_axis_mask bit is set. + *

  4. + *

    {@code ...} grab the full ranges from as many dimensions as needed to fully specify a + * slice for every dimension of the input shape. + *

  5. + *

    {@code :-3:-1} shows the use of negative indices. A negative index {@code i} + * associated with a dimension that has shape {@code s} is converted to a positive index + * {@code s + i}. So {@code -1} becomes {@code s-1} (i.e. the last element). This conversion + * is done internally so begin, end and strides receive x, -3, and -1. The appropriate + * begin_mask bit is set to indicate the start range is the full range (ignoring the x). + *

  6. + *

    {@code :} indicates that the entire contents of the corresponding dimension is + * selected. This is equivalent to {@code ::} or {@code 0::1}. begin, end, and strides + * receive 0, 0, and 1, respectively. The appropriate bits in {@code begin_mask} and {@code + * end_mask} are also set. + *

+ * + *

Requirements: {@code 0 != strides[i] for i in [0, m)} {@code ellipsis_mask must be + * a power of two (only one ellipsis)} * * @param data type for {@code output} output * @param input the input value * @param begin {@code begin[k]} specifies the offset into the {@code k}th range specification. - * The exact dimension this corresponds to will be determined by context. - * Out-of-bounds values will be silently clamped. If the {@code k}th bit of - * {@code begin_mask} then {@code begin[k]} is ignored and the full range of the - * appropriate dimension is used instead. Negative values causes indexing - * to start from the highest element e.g. If {@code foo==[1,2,3]} then {@code foo[-1]==3}. + * The exact dimension this corresponds to will be determined by context. Out-of-bounds values + * will be silently clamped. If the {@code k}th bit of {@code begin_mask} then {@code + * begin[k]} is ignored and the full range of the appropriate dimension is used instead. + * Negative values causes indexing to start from the highest element e.g. If {@code + * foo==[1,2,3]} then {@code foo[-1]==3}. * @param end {@code end[i]} is like {@code begin} with the exception that {@code end_mask} is - * used to determine full ranges. + * used to determine full ranges. * @param strides {@code strides[i]} specifies the increment in the {@code i}th specification - * after extracting a given element. Negative indices will reverse - * the original order. Out or range values are - * clamped to {@code [0,dim[i]) if slice[i]>0} or {@code [-1,dim[i]-1] if slice[i] < 0} + * after extracting a given element. Negative indices will reverse the original order. Out or + * range values are clamped to {@code [0,dim[i]) if slice[i]>0} or {@code [-1,dim[i]-1] if + * slice[i] < 0} * @param options carries optional attribute values * @param data type for {@code StridedSlice} output and operands * @param data type for {@code StridedSlice} output and operands * @return a new instance of StridedSlice */ - public StridedSlice stridedSlice(Operand input, - Operand begin, Operand end, Operand strides, StridedSlice.Options... options) { + public StridedSlice stridedSlice( + Operand input, + Operand begin, + Operand end, + Operand strides, + StridedSlice.Options... options) { return StridedSlice.create(scope, input, begin, end, strides, options); } /** * Assign `value` to the sliced l-value reference of `ref`. - *

- * The values of `value` are assigned to the positions in the variable `ref` that are selected by the slice - * parameters. The slice parameters `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. - *

- * NOTE this op currently does not support broadcasting and so `value`'s shape must be exactly the shape produced by - * the slice of `ref`. + * + *

The values of `value` are assigned to the positions in the variable `ref` that are selected + * by the slice parameters. The slice parameters `begin`, `end`, `strides`, etc. work exactly as + * in `StridedSlice`. + * + *

NOTE this op currently does not support broadcasting and so `value`'s shape must be exactly + * the shape produced by the slice of `ref`. * * @param data type for {@code outputRef()} output * @param ref the tensor to assign to. * @param value the value to assign. - * @param indices The indices to slice. See {@link Indices}. + * @param indices The indices to slice. See {@link Indices}. * @return a new instance of StridedSliceAssign * @see org.tensorflow.op.Ops#stridedSlice(Operand, Index...) */ - public StridedSliceAssign stridedSliceAssign(Operand ref, - Operand value, Index... indices) { + public StridedSliceAssign stridedSliceAssign( + Operand ref, Operand value, Index... indices) { return StridedSliceHelper.stridedSliceAssign(scope, ref, value, indices); } /** - * Assign {@code value} to the sliced l-value reference of {@code ref}. - * The values of {@code value} are assigned to the positions in the variable - * {@code ref} that are selected by the slice parameters. The slice parameters - * {@code begin}, {@code end}, {@code strides}, etc. work exactly as in {@code StridedSlice}. - *

NOTE this op currently does not support broadcasting and so {@code value}'s - * shape must be exactly the shape produced by the slice of {@code ref}. + * Assign {@code value} to the sliced l-value reference of {@code ref}. The values of {@code + * value} are assigned to the positions in the variable {@code ref} that are selected by the slice + * parameters. The slice parameters {@code begin}, {@code end}, {@code strides}, etc. work exactly + * as in {@code StridedSlice}. + * + *

NOTE this op currently does not support broadcasting and so {@code value}'s shape must be + * exactly the shape produced by the slice of {@code ref}. * * @param data type for {@code output_ref} output * @param ref the ref value @@ -6431,20 +6884,24 @@ public StridedSliceAssign stridedSliceAssign(Operand ref * @return a new instance of StridedSliceAssign */ public StridedSliceAssign stridedSliceAssign( - Operand ref, Operand begin, Operand end, Operand strides, Operand value, + Operand ref, + Operand begin, + Operand end, + Operand strides, + Operand value, StridedSliceAssign.Options... options) { return StridedSliceAssign.create(scope, ref, begin, end, strides, value, options); } /** - * Returns the gradient of {@code StridedSlice}. - * Since {@code StridedSlice} cuts out pieces of its {@code input} which is size - * {@code shape}, its gradient will have the same shape (which is passed here - * as {@code shape}). The gradient will be zero in any element that the slice - * does not select. - *

Arguments are the same as StridedSliceGrad with the exception that - * {@code dy} is the input gradient to be propagated and {@code shape} is the - * shape of {@code StridedSlice}'s {@code input}. + * Returns the gradient of {@code StridedSlice}. Since {@code StridedSlice} cuts out pieces of its + * {@code input} which is size {@code shape}, its gradient will have the same shape (which is + * passed here as {@code shape}). The gradient will be zero in any element that the slice does not + * select. + * + *

Arguments are the same as StridedSliceGrad with the exception that {@code dy} is the input + * gradient to be propagated and {@code shape} is the shape of {@code StridedSlice}'s {@code + * input}. * * @param data type for {@code output} output * @param shape the shape value @@ -6457,37 +6914,40 @@ public StridedSliceAssign stridedSliceAs * @param data type for {@code StridedSliceGrad} output and operands * @return a new instance of StridedSliceGrad */ - public StridedSliceGrad stridedSliceGrad(Operand shape, - Operand begin, Operand end, Operand strides, Operand dy, + public StridedSliceGrad stridedSliceGrad( + Operand shape, + Operand begin, + Operand end, + Operand strides, + Operand dy, StridedSliceGrad.Options... options) { return StridedSliceGrad.create(scope, shape, begin, end, strides, dy, options); } /** - * Computes the sum of elements across dimensions of a tensor. - * Reduces {@code input} along the dimensions given in {@code axis}. Unless - * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in - * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are - * retained with length 1. + * Computes the sum of elements across dimensions of a tensor. Reduces {@code input} along the + * dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank of the tensor is + * reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the reduced + * dimensions are retained with length 1. * * @param data type for {@code output} output * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * {@code [-rank(input), rank(input))}. + * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}. * @param options carries optional attribute values * @param data type for {@code Sum} output and operands * @return a new instance of Sum */ - public Sum sum(Operand input, Operand axis, - Sum.Options... options) { + public Sum sum( + Operand input, Operand axis, Sum.Options... options) { return Sum.create(scope, input, axis, options); } /** - * Forwards {@code data} to the output port determined by {@code pred}. - * If {@code pred} is true, the {@code data} input is forwarded to {@code output_true}. Otherwise, - * the data goes to {@code output_false}. - *

See also {@code RefSwitch} and {@code Merge}. + * Forwards {@code data} to the output port determined by {@code pred}. If {@code pred} is true, + * the {@code data} input is forwarded to {@code output_true}. Otherwise, the data goes to {@code + * output_false}. + * + *

See also {@code RefSwitch} and {@code Merge}. * * @param data type for {@code output_false} output * @param data The tensor to be forwarded to the appropriate output. @@ -6500,18 +6960,18 @@ public SwitchCond switchCond(Operand data, OperandIt is the caller's responsibility to ensure that 'ref' is eventually passed to a - * matching 'DestroyTemporaryVariable' op after all other uses have completed. - *

Outputs a ref to the tensor state so it may be read or modified. - *

E.g. - * var = state_ops.temporary_variable([1, 2], types.float) - * var_name = var.op.name - * var = state_ops.assign(var, [[4.0, 5.0]]) - * var = state_ops.assign_add(var, [[6.0, 7.0]]) - * final = state_ops._destroy_temporary_variable(var, var_name=var_name) + * Returns a tensor that may be mutated, but only persists within a single step. This is an + * experimental op for internal use only and it is possible to use this op in unsafe ways. DO NOT + * USE unless you fully understand the risks. + * + *

It is the caller's responsibility to ensure that 'ref' is eventually passed to a matching + * 'DestroyTemporaryVariable' op after all other uses have completed. + * + *

Outputs a ref to the tensor state so it may be read or modified. + * + *

E.g. var = state_ops.temporary_variable([1, 2], types.float) var_name = var.op.name + * var = state_ops.assign(var, [[4.0, 5.0]]) var = state_ops.assign_add(var, [[6.0, 7.0]]) final = + * state_ops._destroy_temporary_variable(var, var_name=var_name) * * @param data type for {@code ref} output * @param shape The shape of the variable tensor. @@ -6520,14 +6980,13 @@ public SwitchCond switchCond(Operand data, Operand data type for {@code TemporaryVariable} output and operands * @return a new instance of TemporaryVariable */ - public TemporaryVariable temporaryVariable(Shape shape, Class dtype, - TemporaryVariable.Options... options) { + public TemporaryVariable temporaryVariable( + Shape shape, Class dtype, TemporaryVariable.Options... options) { return TemporaryVariable.create(scope, shape, dtype, options); } /** - * An array of Tensors of given size. - * Write data via Write and read via Read or Pack. + * An array of Tensors of given size. Write data via Write and read via Read or Pack. * * @param sizeOutput The size of the array. * @param dtype The type of the elements on the tensor_array. @@ -6535,15 +6994,14 @@ public TemporaryVariable temporaryVariable(Shape shape, Cla * @param data type for {@code TensorArrayV3} output and operands * @return a new instance of TensorArray */ - public TensorArray tensorArray(Operand sizeOutput, Class dtype, - TensorArray.Options... options) { + public TensorArray tensorArray( + Operand sizeOutput, Class dtype, TensorArray.Options... options) { return TensorArray.create(scope, sizeOutput, dtype, options); } /** - * Delete the TensorArray from its resource container. - * This enables the user to close and release the resource in the middle - * of a step/run. + * Delete the TensorArray from its resource container. This enables the user to close and release + * the resource in the middle of a step/run. * * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). * @return a new instance of TensorArrayClose @@ -6553,14 +7011,18 @@ public TensorArrayClose tensorArrayClose(Operand handle) { } /** - * Concat the elements from the TensorArray into value {@code value}. - * Takes {@code T} elements of shapes - *

+   * Concat the elements from the TensorArray into value {@code value}. Takes {@code T} elements of
+   * shapes
+   *
+   * 
    *  (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
    *  
- *

and concatenates them into a Tensor of shape: - *

{@code (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)} - *

All elements must have the same shape (excepting the first dimension). + * + *

and concatenates them into a Tensor of shape: + * + *

{@code (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)} + * + *

All elements must have the same shape (excepting the first dimension). * * @param data type for {@code value} output * @param handle The handle to a TensorArray. @@ -6570,14 +7032,17 @@ public TensorArrayClose tensorArrayClose(Operand handle) { * @param data type for {@code TensorArrayConcatV3} output and operands * @return a new instance of TensorArrayConcat */ - public TensorArrayConcat tensorArrayConcat(Operand handle, - Operand flowIn, Class dtype, TensorArrayConcat.Options... options) { + public TensorArrayConcat tensorArrayConcat( + Operand handle, + Operand flowIn, + Class dtype, + TensorArrayConcat.Options... options) { return TensorArrayConcat.create(scope, handle, flowIn, dtype, options); } /** - * Gather specific elements from the TensorArray into output {@code value}. - * All elements selected by {@code indices} must have the same shape. + * Gather specific elements from the TensorArray into output {@code value}. All elements selected + * by {@code indices} must have the same shape. * * @param data type for {@code value} output * @param handle The handle to a TensorArray. @@ -6588,72 +7053,80 @@ public TensorArrayConcat tensorArrayConcat(Operand data type for {@code TensorArrayGatherV3} output and operands * @return a new instance of TensorArrayGather */ - public TensorArrayGather tensorArrayGather(Operand handle, - Operand indices, Operand flowIn, Class dtype, + public TensorArrayGather tensorArrayGather( + Operand handle, + Operand indices, + Operand flowIn, + Class dtype, TensorArrayGather.Options... options) { return TensorArrayGather.create(scope, handle, indices, flowIn, dtype, options); } /** - * Creates a TensorArray for storing the gradients of values in the given handle. - * If the given TensorArray gradient already exists, returns a reference to it. - *

Locks the size of the original TensorArray by disabling its dynamic size flag. - *

A note about the input flow_in: - *

The handle flow_in forces the execution of the gradient lookup to occur - * only after certain other operations have occurred. For example, when - * the forward TensorArray is dynamically sized, writes to this TensorArray - * may resize the object. The gradient TensorArray is statically sized based - * on the size of the forward TensorArray when this operation executes. - * Furthermore, the size of the forward TensorArray is frozen by this call. - * As a result, the flow is used to ensure that the call to generate the gradient - * TensorArray only happens after all writes are executed. - *

In the case of dynamically sized TensorArrays, gradient computation should - * only be performed on read operations that have themselves been chained via - * flow to occur only after all writes have executed. That way the final size - * of the forward TensorArray is known when this operation is called. - *

A note about the source attribute: - *

TensorArray gradient calls use an accumulator TensorArray object. If - * multiple gradients are calculated and run in the same session, the multiple - * gradient nodes may accidentally flow through the same accumulator TensorArray. - * This double counts and generally breaks the TensorArray gradient flow. - *

The solution is to identify which gradient call this particular - * TensorArray gradient is being called in. This is performed by identifying - * a unique string (e.g. "gradients", "gradients_1", ...) from the input - * gradient Tensor's name. This string is used as a suffix when creating - * the TensorArray gradient object here (the attribute {@code source}). - *

The attribute {@code source} is added as a suffix to the forward TensorArray's - * name when performing the creation / lookup, so that each separate gradient - * calculation gets its own TensorArray accumulator. + * Creates a TensorArray for storing the gradients of values in the given handle. If the given + * TensorArray gradient already exists, returns a reference to it. + * + *

Locks the size of the original TensorArray by disabling its dynamic size flag. + * + *

A note about the input flow_in: + * + *

The handle flow_in forces the execution of the gradient lookup to occur only after certain + * other operations have occurred. For example, when the forward TensorArray is dynamically sized, + * writes to this TensorArray may resize the object. The gradient TensorArray is statically sized + * based on the size of the forward TensorArray when this operation executes. Furthermore, the + * size of the forward TensorArray is frozen by this call. As a result, the flow is used to ensure + * that the call to generate the gradient TensorArray only happens after all writes are executed. + * + *

In the case of dynamically sized TensorArrays, gradient computation should only be performed + * on read operations that have themselves been chained via flow to occur only after all writes + * have executed. That way the final size of the forward TensorArray is known when this operation + * is called. + * + *

A note about the source attribute: + * + *

TensorArray gradient calls use an accumulator TensorArray object. If multiple gradients are + * calculated and run in the same session, the multiple gradient nodes may accidentally flow + * through the same accumulator TensorArray. This double counts and generally breaks the + * TensorArray gradient flow. + * + *

The solution is to identify which gradient call this particular TensorArray gradient is + * being called in. This is performed by identifying a unique string (e.g. "gradients", + * "gradients_1", ...) from the input gradient Tensor's name. This string is used as a + * suffix when creating the TensorArray gradient object here (the attribute {@code source}). + * + *

The attribute {@code source} is added as a suffix to the forward TensorArray's name when + * performing the creation / lookup, so that each separate gradient calculation gets its own + * TensorArray accumulator. * * @param handle The handle to the forward TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. - * @param source The gradient source string, used to decide which gradient TensorArray - * to return. + * @param source The gradient source string, used to decide which gradient TensorArray to return. * @return a new instance of TensorArrayGrad */ - public TensorArrayGrad tensorArrayGrad(Operand handle, Operand flowIn, - String source) { + public TensorArrayGrad tensorArrayGrad( + Operand handle, Operand flowIn, String source) { return TensorArrayGrad.create(scope, handle, flowIn, source); } /** - * Creates a TensorArray for storing multiple gradients of values in the given handle. - * Similar to TensorArrayGradV3. However it creates an accumulator with an - * expanded shape compared to the input TensorArray whose gradient is being - * computed. This enables multiple gradients for the same TensorArray to be - * calculated using the same accumulator. + * Creates a TensorArray for storing multiple gradients of values in the given handle. Similar to + * TensorArrayGradV3. However it creates an accumulator with an expanded shape compared to the + * input TensorArray whose gradient is being computed. This enables multiple gradients for the + * same TensorArray to be calculated using the same accumulator. * * @param handle The handle to the forward TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. - * @param shapeToPrepend An int32 vector representing a shape. Elements in the gradient accumulator will - * have shape which is this shape_to_prepend value concatenated with shape of the - * elements in the TensorArray corresponding to the input handle. - * @param source The gradient source string, used to decide which gradient TensorArray - * to return. + * @param shapeToPrepend An int32 vector representing a shape. Elements in the gradient + * accumulator will have shape which is this shape_to_prepend value concatenated with shape of + * the elements in the TensorArray corresponding to the input handle. + * @param source The gradient source string, used to decide which gradient TensorArray to return. * @return a new instance of TensorArrayGradWithShape */ - public TensorArrayGradWithShape tensorArrayGradWithShape(Operand handle, - Operand flowIn, Operand shapeToPrepend, String source) { + public TensorArrayGradWithShape tensorArrayGradWithShape( + Operand handle, + Operand flowIn, + Operand shapeToPrepend, + String source) { return TensorArrayGradWithShape.create(scope, handle, flowIn, shapeToPrepend, source); } @@ -6668,8 +7141,11 @@ public TensorArrayGradWithShape tensorArrayGradWithShape(Operand data type for {@code TensorArrayPack} output and operands * @return a new instance of TensorArrayPack */ - public TensorArrayPack tensorArrayPack(Operand handle, - Operand flowIn, Class dtype, TensorArrayPack.Options... options) { + public TensorArrayPack tensorArrayPack( + Operand handle, + Operand flowIn, + Class dtype, + TensorArrayPack.Options... options) { return TensorArrayPack.create(scope, handle, flowIn, dtype, options); } @@ -6684,14 +7160,17 @@ public TensorArrayPack tensorArrayPack(Operand han * @param data type for {@code TensorArrayReadV3} output and operands * @return a new instance of TensorArrayRead */ - public TensorArrayRead tensorArrayRead(Operand handle, - Operand index, Operand flowIn, Class dtype) { + public TensorArrayRead tensorArrayRead( + Operand handle, + Operand index, + Operand flowIn, + Class dtype) { return TensorArrayRead.create(scope, handle, index, flowIn, dtype); } /** - * Scatter the data from the input value into specific TensorArray elements. - * {@code indices} must be a vector, its length must match the first dim of {@code value}. + * Scatter the data from the input value into specific TensorArray elements. {@code indices} must + * be a vector, its length must match the first dim of {@code value}. * * @param handle The handle to a TensorArray. * @param indices The locations at which to write the tensor elements. @@ -6699,8 +7178,11 @@ public TensorArrayRead tensorArrayRead(Operand handle, - Operand indices, Operand value, Operand flowIn) { + public TensorArrayScatter tensorArrayScatter( + Operand handle, + Operand indices, + Operand value, + Operand flowIn) { return TensorArrayScatter.create(scope, handle, indices, value, flowIn); } @@ -6711,32 +7193,42 @@ public TensorArrayScatter tensorArrayScatter(Operand handle, * @param flowIn A float scalar that enforces proper chaining of operations. * @return a new instance of TensorArraySize */ - public TensorArraySize tensorArraySize(Operand handle, - Operand flowIn) { + public TensorArraySize tensorArraySize( + Operand handle, Operand flowIn) { return TensorArraySize.create(scope, handle, flowIn); } /** - * Split the data from the input value into TensorArray elements. - * Assuming that {@code lengths} takes on values - *

{@code (n0, n1, ..., n(T-1))} - *

and that {@code value} has shape - *

{@code (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}, - *

this splits values into a TensorArray with T tensors. - *

TensorArray index t will be the subtensor of values with starting position - *

{@code (n0 + n1 + ... + n(t-1), 0, 0, ...)} - *

and having size - *

{@code nt x d0 x d1 x ...} + * Split the data from the input value into TensorArray elements. Assuming that {@code lengths} + * takes on values + * + *

{@code (n0, n1, ..., n(T-1))} + * + *

and that {@code value} has shape + * + *

{@code (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}, + * + *

this splits values into a TensorArray with T tensors. + * + *

TensorArray index t will be the subtensor of values with starting position + * + *

{@code (n0 + n1 + ... + n(t-1), 0, 0, ...)} + * + *

and having size + * + *

{@code nt x d0 x d1 x ...} * * @param handle The handle to a TensorArray. * @param value The concatenated tensor to write to the TensorArray. - * @param lengths The vector of lengths, how to split the rows of value into the - * TensorArray. + * @param lengths The vector of lengths, how to split the rows of value into the TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @return a new instance of TensorArraySplit */ - public TensorArraySplit tensorArraySplit(Operand handle, - Operand value, Operand lengths, Operand flowIn) { + public TensorArraySplit tensorArraySplit( + Operand handle, + Operand value, + Operand lengths, + Operand flowIn) { return TensorArraySplit.create(scope, handle, value, lengths, flowIn); } @@ -6748,8 +7240,8 @@ public TensorArraySplit tensorArraySplit(Operand handle, * @param flowIn the flowIn value * @return a new instance of TensorArrayUnpack */ - public TensorArrayUnpack tensorArrayUnpack(Operand handle, - Operand value, Operand flowIn) { + public TensorArrayUnpack tensorArrayUnpack( + Operand handle, Operand value, Operand flowIn) { return TensorArrayUnpack.create(scope, handle, value, flowIn); } @@ -6762,23 +7254,24 @@ public TensorArrayUnpack tensorArrayUnpack(Operand handle, * @param flowIn A float scalar that enforces proper chaining of operations. * @return a new instance of TensorArrayWrite */ - public TensorArrayWrite tensorArrayWrite(Operand handle, Operand index, - Operand value, Operand flowIn) { + public TensorArrayWrite tensorArrayWrite( + Operand handle, + Operand index, + Operand value, + Operand flowIn) { return TensorArrayWrite.create(scope, handle, index, value, flowIn); } /** - * Concats all tensors in the list along the 0th dimension. - * Requires that all tensors have the same shape except the first dimension. - *

input_handle: The input list. - * element_shape: The shape of the uninitialized elements in the list. If the first - * dimension is not -1, it is assumed that all list elements have the same - * leading dim. - * leading_dims: The list of leading dims of uninitialized list elements. Used if - * the leading dim of input_handle.element_shape or the element_shape input arg - * is not already set. - * tensor: The concated result. - * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. + * Concats all tensors in the list along the 0th dimension. Requires that all tensors have the + * same shape except the first dimension. + * + *

input_handle: The input list. element_shape: The shape of the uninitialized elements in the + * list. If the first dimension is not -1, it is assumed that all list elements have the same + * leading dim. leading_dims: The list of leading dims of uninitialized list elements. Used if the + * leading dim of input_handle.element_shape or the element_shape input arg is not already set. + * tensor: The concated result. lengths: Output tensor containing sizes of the 0th dimension of + * tensors in the list, used for computing the gradient. * * @param data type for {@code tensor} output * @param inputHandle the inputHandle value @@ -6789,8 +7282,10 @@ public TensorArrayWrite tensorArrayWrite(Operand handle, Operan * @return a new instance of TensorListConcat */ public TensorListConcat tensorListConcat( - Operand inputHandle, Operand elementShape, - Operand leadingDims, Class elementDtype) { + Operand inputHandle, + Operand elementShape, + Operand leadingDims, + Class elementDtype) { return TensorListConcat.create(scope, inputHandle, elementShape, leadingDims, elementDtype); } @@ -6809,9 +7304,8 @@ public TensorListConcatLists tensorListConcatLists( } /** - * The shape of the elements of the given list, as a tensor. - * input_handle: the list - * element_shape: the shape of elements of the list + * The shape of the elements of the given list, as a tensor. input_handle: the list element_shape: + * the shape of elements of the list * * @param data type for {@code element_shape} output * @param inputHandle the inputHandle value @@ -6825,27 +7319,26 @@ public TensorListElementShape tensorListElementShape( } /** - * Creates a TensorList which, when stacked, has the value of {@code tensor}. - * Each tensor in the result list corresponds to one row of the input tensor. - *

tensor: The input tensor. - * output_handle: The list. + * Creates a TensorList which, when stacked, has the value of {@code tensor}. Each tensor in the + * result list corresponds to one row of the input tensor. + * + *

tensor: The input tensor. output_handle: The list. * * @param tensor the tensor value * @param elementShape the elementShape value * @return a new instance of TensorListFromTensor */ - public TensorListFromTensor tensorListFromTensor(Operand tensor, - Operand elementShape) { + public TensorListFromTensor tensorListFromTensor( + Operand tensor, Operand elementShape) { return TensorListFromTensor.create(scope, tensor, elementShape); } /** - * Creates a Tensor by indexing into the TensorList. - * Each row in the produced Tensor corresponds to the element in the TensorList - * specified by the given index (see {@code tf.gather}). - *

input_handle: The input tensor list. - * indices: The indices used to index into the list. - * values: The tensor. + * Creates a Tensor by indexing into the TensorList. Each row in the produced Tensor corresponds + * to the element in the TensorList specified by the given index (see {@code tf.gather}). + * + *

input_handle: The input tensor list. indices: The indices used to index into the list. + * values: The tensor. * * @param data type for {@code values} output * @param inputHandle the inputHandle value @@ -6856,7 +7349,9 @@ public TensorListFromTensor tensorListFromTensor(Operand tensor * @return a new instance of TensorListGather */ public TensorListGather tensorListGather( - Operand inputHandle, Operand indices, Operand elementShape, + Operand inputHandle, + Operand indices, + Operand elementShape, Class elementDtype) { return TensorListGather.create(scope, inputHandle, indices, elementShape, elementDtype); } @@ -6873,15 +7368,16 @@ public TensorListGather tensorListGather( * @return a new instance of TensorListGetItem */ public TensorListGetItem tensorListGetItem( - Operand inputHandle, Operand index, Operand elementShape, + Operand inputHandle, + Operand index, + Operand elementShape, Class elementDtype) { return TensorListGetItem.create(scope, inputHandle, index, elementShape, elementDtype); } /** - * Returns the number of tensors in the input tensor list. - * input_handle: the input list - * length: the number of tensors in the list + * Returns the number of tensors in the input tensor list. input_handle: the input list length: + * the number of tensors in the list * * @param inputHandle the inputHandle value * @return a new instance of TensorListLength @@ -6891,12 +7387,11 @@ public TensorListLength tensorListLength(Operand inputHandle) { } /** - * Returns the last element of the input list as well as a list with all but that element. - * Fails if the list is empty. - *

input_handle: the input list - * tensor: the withdrawn last element of the list - * element_dtype: the type of elements in the list - * element_shape: the shape of the output tensor + * Returns the last element of the input list as well as a list with all but that element. Fails + * if the list is empty. + * + *

input_handle: the input list tensor: the withdrawn last element of the list element_dtype: + * the type of elements in the list element_shape: the shape of the output tensor * * @param data type for {@code tensor} output * @param inputHandle the inputHandle value @@ -6911,19 +7406,18 @@ public TensorListPopBack tensorListPopBack( } /** - * Returns a list which has the passed-in {@code Tensor} as last element and the other elements of the given list in {@code input_handle}. - * tensor: The tensor to put on the list. - * input_handle: The old list. - * output_handle: A list with the elements of the old list followed by tensor. - * element_dtype: the type of elements in the list. - * element_shape: a shape compatible with that of elements in the list. + * Returns a list which has the passed-in {@code Tensor} as last element and the other elements of + * the given list in {@code input_handle}. tensor: The tensor to put on the list. input_handle: + * The old list. output_handle: A list with the elements of the old list followed by tensor. + * element_dtype: the type of elements in the list. element_shape: a shape compatible with that of + * elements in the list. * * @param inputHandle the inputHandle value * @param tensor the tensor value * @return a new instance of TensorListPushBack */ - public TensorListPushBack tensorListPushBack(Operand inputHandle, - Operand tensor) { + public TensorListPushBack tensorListPushBack( + Operand inputHandle, Operand tensor) { return TensorListPushBack.create(scope, inputHandle, tensor); } @@ -6934,17 +7428,15 @@ public TensorListPushBack tensorListPushBack(Operand inputHandl * @param tensor the tensor value * @return a new instance of TensorListPushBackBatch */ - public TensorListPushBackBatch tensorListPushBackBatch(Operand inputHandles, - Operand tensor) { + public TensorListPushBackBatch tensorListPushBackBatch( + Operand inputHandles, Operand tensor) { return TensorListPushBackBatch.create(scope, inputHandles, tensor); } /** - * List of the given size with empty elements. - * element_shape: the shape of the future elements of the list - * num_elements: the number of elements to reserve - * handle: the output list - * element_dtype: the desired type of elements in the list. + * List of the given size with empty elements. element_shape: the shape of the future elements of + * the list num_elements: the number of elements to reserve handle: the output list element_dtype: + * the desired type of elements in the list. * * @param elementShape the elementShape value * @param numElements the numElements value @@ -6958,31 +7450,26 @@ public TensorListReserve tensorListReserve( } /** - * Resizes the list. - * input_handle: the input list - * size: size of the output list + * Resizes the list. input_handle: the input list size: size of the output list * * @param inputHandle the inputHandle value * @param sizeOutput the sizeOutput value * @return a new instance of TensorListResize */ - public TensorListResize tensorListResize(Operand inputHandle, - Operand sizeOutput) { + public TensorListResize tensorListResize( + Operand inputHandle, Operand sizeOutput) { return TensorListResize.create(scope, inputHandle, sizeOutput); } /** - * Creates a TensorList by indexing into a Tensor. - * Each member of the TensorList corresponds to one row of the input tensor, - * specified by the given index (see {@code tf.gather}). - *

tensor: The input tensor. - * indices: The indices used to index into the list. - * element_shape: The shape of the elements in the list (can be less specified than - * the shape of the tensor). - * num_elements: The size of the output list. Must be large enough to accommodate - * the largest index in indices. If -1, the list is just large enough to include - * the largest index in indices. - * output_handle: The TensorList. + * Creates a TensorList by indexing into a Tensor. Each member of the TensorList corresponds to + * one row of the input tensor, specified by the given index (see {@code tf.gather}). + * + *

tensor: The input tensor. indices: The indices used to index into the list. element_shape: + * The shape of the elements in the list (can be less specified than the shape of the tensor). + * num_elements: The size of the output list. Must be large enough to accommodate the largest + * index in indices. If -1, the list is just large enough to include the largest index in indices. + * output_handle: The TensorList. * * @param tensor the tensor value * @param indices the indices value @@ -6990,20 +7477,20 @@ public TensorListResize tensorListResize(Operand inputHandle, * @param numElements the numElements value * @return a new instance of TensorListScatter */ - public TensorListScatter tensorListScatter(Operand tensor, - Operand indices, Operand elementShape, + public TensorListScatter tensorListScatter( + Operand tensor, + Operand indices, + Operand elementShape, Operand numElements) { return TensorListScatter.create(scope, tensor, indices, elementShape, numElements); } /** - * Scatters tensor at indices in an input list. - * Each member of the TensorList corresponds to one row of the input tensor, - * specified by the given index (see {@code tf.gather}). - *

input_handle: The list to scatter into. - * tensor: The input tensor. - * indices: The indices used to index into the list. - * output_handle: The TensorList. + * Scatters tensor at indices in an input list. Each member of the TensorList corresponds to one + * row of the input tensor, specified by the given index (see {@code tf.gather}). + * + *

input_handle: The list to scatter into. tensor: The input tensor. indices: The indices used + * to index into the list. output_handle: The TensorList. * * @param inputHandle the inputHandle value * @param tensor the tensor value @@ -7011,7 +7498,8 @@ public TensorListScatter tensorListScatter(Operand tensor, * @return a new instance of TensorListScatterIntoExistingList */ public TensorListScatterIntoExistingList tensorListScatterIntoExistingList( - Operand inputHandle, Operand tensor, + Operand inputHandle, + Operand tensor, Operand indices) { return TensorListScatterIntoExistingList.create(scope, inputHandle, tensor, indices); } @@ -7024,36 +7512,36 @@ public TensorListScatterIntoExistingList tensorListScatterIntoExistingList( * @param item the item value * @return a new instance of TensorListSetItem */ - public TensorListSetItem tensorListSetItem(Operand inputHandle, - Operand index, Operand item) { + public TensorListSetItem tensorListSetItem( + Operand inputHandle, Operand index, Operand item) { return TensorListSetItem.create(scope, inputHandle, index, item); } /** - * Splits a tensor into a list. - * list[i] corresponds to lengths[i] tensors from the input tensor. - * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. - *

tensor: The input tensor. - * element_shape: A shape compatible with that of elements in the tensor. - * lengths: Vector of sizes of the 0th dimension of tensors in the list. - * output_handle: The list. + * Splits a tensor into a list. list[i] corresponds to lengths[i] tensors from the input tensor. + * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. + * + *

tensor: The input tensor. element_shape: A shape compatible with that of elements in the + * tensor. lengths: Vector of sizes of the 0th dimension of tensors in the list. output_handle: + * The list. * * @param tensor the tensor value * @param elementShape the elementShape value * @param lengths the lengths value * @return a new instance of TensorListSplit */ - public TensorListSplit tensorListSplit(Operand tensor, - Operand elementShape, Operand lengths) { + public TensorListSplit tensorListSplit( + Operand tensor, + Operand elementShape, + Operand lengths) { return TensorListSplit.create(scope, tensor, elementShape, lengths); } /** - * Stacks all tensors in the list. - * Requires that all tensors have the same shape. - *

input_handle: the input list - * tensor: the gathered result - * num_elements: optional. If not -1, the number of elements in the list. + * Stacks all tensors in the list. Requires that all tensors have the same shape. + * + *

input_handle: the input list tensor: the gathered result num_elements: optional. If not -1, + * the number of elements in the list. * * @param data type for {@code tensor} output * @param inputHandle the inputHandle value @@ -7063,16 +7551,17 @@ public TensorListSplit tensorListSplit(Operand tensor, * @param data type for {@code TensorListStack} output and operands * @return a new instance of TensorListStack */ - public TensorListStack tensorListStack(Operand inputHandle, - Operand elementShape, Class elementDtype, TensorListStack.Options... options) { + public TensorListStack tensorListStack( + Operand inputHandle, + Operand elementShape, + Class elementDtype, + TensorListStack.Options... options) { return TensorListStack.create(scope, inputHandle, elementShape, elementDtype, options); } /** - * Returns a tensor map with item from given key erased. - * input_handle: the original map - * output_handle: the map with value from given key removed - * key: the key of the value to be erased + * Returns a tensor map with item from given key erased. input_handle: the original map + * output_handle: the map with value from given key removed key: the key of the value to be erased * * @param inputHandle the inputHandle value * @param key the key value @@ -7080,48 +7569,44 @@ public TensorListStack tensorListStack(Operand data type for {@code TensorMapErase} output and operands * @return a new instance of TensorMapErase */ - public TensorMapErase tensorMapErase(Operand inputHandle, - Operand key, Class valueDtype) { + public TensorMapErase tensorMapErase( + Operand inputHandle, Operand key, Class valueDtype) { return TensorMapErase.create(scope, inputHandle, key, valueDtype); } /** - * Returns whether the given key exists in the map. - * input_handle: the input map - * key: the key to check - * has_key: whether the key is already in the map or not + * Returns whether the given key exists in the map. input_handle: the input map key: the key to + * check has_key: whether the key is already in the map or not * * @param inputHandle the inputHandle value * @param key the key value * @return a new instance of TensorMapHasKey */ - public TensorMapHasKey tensorMapHasKey(Operand inputHandle, - Operand key) { + public TensorMapHasKey tensorMapHasKey( + Operand inputHandle, Operand key) { return TensorMapHasKey.create(scope, inputHandle, key); } /** - * Returns a map that is the 'input_handle' with the given key-value pair inserted. - * input_handle: the original map - * output_handle: the map with key and value inserted - * key: the key to be inserted - * value: the value to be inserted + * Returns a map that is the 'input_handle' with the given key-value pair inserted. input_handle: + * the original map output_handle: the map with key and value inserted key: the key to be inserted + * value: the value to be inserted * * @param inputHandle the inputHandle value * @param key the key value * @param value the value value * @return a new instance of TensorMapInsert */ - public TensorMapInsert tensorMapInsert(Operand inputHandle, - Operand key, Operand value) { + public TensorMapInsert tensorMapInsert( + Operand inputHandle, + Operand key, + Operand value) { return TensorMapInsert.create(scope, inputHandle, key, value); } /** - * Returns the value from a given key in a tensor map. - * input_handle: the input map - * key: the key to be looked up - * value: the value found from the given key + * Returns the value from a given key in a tensor map. input_handle: the input map key: the key to + * be looked up value: the value found from the given key * * @param data type for {@code value} output * @param inputHandle the inputHandle value @@ -7130,15 +7615,14 @@ public TensorMapInsert tensorMapInsert(Operand inputHandle, * @param data type for {@code TensorMapLookup} output and operands * @return a new instance of TensorMapLookup */ - public TensorMapLookup tensorMapLookup(Operand inputHandle, - Operand key, Class valueDtype) { + public TensorMapLookup tensorMapLookup( + Operand inputHandle, Operand key, Class valueDtype) { return TensorMapLookup.create(scope, inputHandle, key, valueDtype); } /** - * Returns the number of tensors in the input tensor map. - * input_handle: the input map - * size: the number of tensors in the map + * Returns the number of tensors in the input tensor map. input_handle: the input map size: the + * number of tensors in the map * * @param inputHandle the inputHandle value * @return a new instance of TensorMapSize @@ -7148,9 +7632,8 @@ public TensorMapSize tensorMapSize(Operand inputHandle) { } /** - * Returns a Tensor stack of all keys in a tensor map. - * input_handle: the input map - * keys: the returned Tensor of all keys in the map + * Returns a Tensor stack of all keys in a tensor map. input_handle: the input map keys: the + * returned Tensor of all keys in the map * * @param data type for {@code keys} output * @param inputHandle the inputHandle value @@ -7164,45 +7647,55 @@ public TensorMapStackKeys tensorMapStackKeys( } /** - * Adds sparse {@code updates} to an existing tensor according to {@code indices}. - * This operation creates a new tensor by adding sparse {@code updates} to the passed - * in {@code tensor}. - * This operation is very similar to {@code tf.compat.v1.scatter_nd_add}, except that the updates - * are added onto an existing tensor (as opposed to a variable). If the memory - * for the existing tensor cannot be re-used, a copy is made and updated. - *

{@code indices} is an integer tensor containing indices into a new tensor of shape - * {@code tensor.shape}. The last dimension of {@code indices} can be at most the rank of - * {@code tensor.shape}: - *

+   * Adds sparse {@code updates} to an existing tensor according to {@code indices}. This operation
+   * creates a new tensor by adding sparse {@code updates} to the passed in {@code tensor}. This
+   * operation is very similar to {@code tf.compat.v1.scatter_nd_add}, except that the updates are
+   * added onto an existing tensor (as opposed to a variable). If the memory for the existing tensor
+   * cannot be re-used, a copy is made and updated.
+   *
+   * 

{@code indices} is an integer tensor containing indices into a new tensor of shape {@code + * tensor.shape}. The last dimension of {@code indices} can be at most the rank of {@code + * tensor.shape}: + * + *

    *  indices.shape[-1] <= tensor.shape.rank
    *  
- *

The last dimension of {@code indices} corresponds to indices into elements - * (if {@code indices.shape[-1] = tensor.shape.rank}) or slices - * (if {@code indices.shape[-1] < tensor.shape.rank}) along dimension - * {@code indices.shape[-1]} of {@code tensor.shape}. {@code updates} is a tensor with shape - *

+   *
+   * 

The last dimension of {@code indices} corresponds to indices into elements (if {@code + * indices.shape[-1] = tensor.shape.rank}) or slices (if {@code indices.shape[-1] < + * tensor.shape.rank}) along dimension {@code indices.shape[-1]} of {@code tensor.shape}. {@code + * updates} is a tensor with shape + * + *

    *  indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
    *  
- *

The simplest form of tensor_scatter_add is to add individual elements to a - * tensor by index. For example, say we want to add 4 elements in a rank-1 - * tensor with 8 elements. - *

In Python, this scatter add operation would look like this: - *

+   *
+   * 

The simplest form of tensor_scatter_add is to add individual elements to a tensor by index. + * For example, say we want to add 4 elements in a rank-1 tensor with 8 elements. + * + *

In Python, this scatter add operation would look like this: + * + *

    *      indices = tf.constant([[4], [3], [1], [7]])
    *      updates = tf.constant([9, 10, 11, 12])
    *      tensor = tf.ones([8], dtype=tf.int32)
    *      updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
    *      print(updated)
    *  
- *

The resulting tensor would look like this: - *

+   *
+   * 

The resulting tensor would look like this: + * + *

    *  [1, 12, 1, 11, 10, 1, 1, 13]
    *  
- *

We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. - *

In Python, this scatter add operation would look like this: - *

+   *
+   * 

We can also, insert entire slices of a higher rank tensor all at once. For example, if we + * wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new + * values. + * + *

In Python, this scatter add operation would look like this: + * + *

    *      indices = tf.constant([[0], [2]])
    *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
    *                              [7, 7, 7, 7], [8, 8, 8, 8]],
@@ -7212,15 +7705,18 @@ public  TensorMapStackKeys tensorMapStackKeys(
    *      updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
    *      print(updated)
    *  
- *

The resulting tensor would look like this: - *

+   *
+   * 

The resulting tensor would look like this: + * + *

    *  [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
    *   [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
    *   [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
    *   [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
    *  
- *

Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * + *

Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out + * of bound index is found, the index is ignored. * * @param data type for {@code output} output * @param tensor Tensor to copy/update. @@ -7229,8 +7725,8 @@ public TensorMapStackKeys tensorMapStackKeys( * @param data type for {@code TensorScatterAdd} output and operands * @return a new instance of TensorScatterNdAdd */ - public TensorScatterNdAdd tensorScatterNdAdd(Operand tensor, - Operand indices, Operand updates) { + public TensorScatterNdAdd tensorScatterNdAdd( + Operand tensor, Operand indices, Operand updates) { return TensorScatterNdAdd.create(scope, tensor, indices, updates); } @@ -7244,8 +7740,8 @@ public TensorScatterNdAdd tensorScatterNdAdd(Operand ten * @param data type for {@code TensorScatterMax} output and operands * @return a new instance of TensorScatterNdMax */ - public TensorScatterNdMax tensorScatterNdMax(Operand tensor, - Operand indices, Operand updates) { + public TensorScatterNdMax tensorScatterNdMax( + Operand tensor, Operand indices, Operand updates) { return TensorScatterNdMax.create(scope, tensor, indices, updates); } @@ -7259,50 +7755,60 @@ public TensorScatterNdMax tensorScatterNdMax(Operand ten * @param data type for {@code TensorScatterMin} output and operands * @return a new instance of TensorScatterNdMin */ - public TensorScatterNdMin tensorScatterNdMin(Operand tensor, - Operand indices, Operand updates) { + public TensorScatterNdMin tensorScatterNdMin( + Operand tensor, Operand indices, Operand updates) { return TensorScatterNdMin.create(scope, tensor, indices, updates); } /** - * Subtracts sparse {@code updates} from an existing tensor according to {@code indices}. - * This operation creates a new tensor by subtracting sparse {@code updates} from the - * passed in {@code tensor}. - * This operation is very similar to {@code tf.scatter_nd_sub}, except that the updates - * are subtracted from an existing tensor (as opposed to a variable). If the memory - * for the existing tensor cannot be re-used, a copy is made and updated. - *

{@code indices} is an integer tensor containing indices into a new tensor of shape - * {@code shape}. The last dimension of {@code indices} can be at most the rank of {@code shape}: - *

+   * Subtracts sparse {@code updates} from an existing tensor according to {@code indices}. This
+   * operation creates a new tensor by subtracting sparse {@code updates} from the passed in {@code
+   * tensor}. This operation is very similar to {@code tf.scatter_nd_sub}, except that the updates
+   * are subtracted from an existing tensor (as opposed to a variable). If the memory for the
+   * existing tensor cannot be re-used, a copy is made and updated.
+   *
+   * 

{@code indices} is an integer tensor containing indices into a new tensor of shape {@code + * shape}. The last dimension of {@code indices} can be at most the rank of {@code shape}: + * + *

    *  indices.shape[-1] <= shape.rank
    *  
- *

The last dimension of {@code indices} corresponds to indices into elements - * (if {@code indices.shape[-1] = shape.rank}) or slices - * (if {@code indices.shape[-1] < shape.rank}) along dimension {@code indices.shape[-1]} of - * {@code shape}. {@code updates} is a tensor with shape - *

+   *
+   * 

The last dimension of {@code indices} corresponds to indices into elements (if {@code + * indices.shape[-1] = shape.rank}) or slices (if {@code indices.shape[-1] < shape.rank}) along + * dimension {@code indices.shape[-1]} of {@code shape}. {@code updates} is a tensor with shape + * + *

    *  indices.shape[:-1] + shape[indices.shape[-1]:]
    *  
- *

The simplest form of tensor_scatter_sub is to subtract individual elements - * from a tensor by index. For example, say we want to insert 4 scattered elements - * in a rank-1 tensor with 8 elements. - *

In Python, this scatter subtract operation would look like this: - *

+   *
+   * 

The simplest form of tensor_scatter_sub is to subtract individual elements from a tensor by + * index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 + * elements. + * + *

In Python, this scatter subtract operation would look like this: + * + *

    *      indices = tf.constant([[4], [3], [1], [7]])
    *      updates = tf.constant([9, 10, 11, 12])
    *      tensor = tf.ones([8], dtype=tf.int32)
    *      updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
    *      print(updated)
    *  
- *

The resulting tensor would look like this: - *

+   *
+   * 

The resulting tensor would look like this: + * + *

    *  [1, -10, 1, -9, -8, 1, 1, -11]
    *  
- *

We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. - *

In Python, this scatter add operation would look like this: - *

+   *
+   * 

We can also, insert entire slices of a higher rank tensor all at once. For example, if we + * wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new + * values. + * + *

In Python, this scatter add operation would look like this: + * + *

    *      indices = tf.constant([[0], [2]])
    *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
    *                              [7, 7, 7, 7], [8, 8, 8, 8]],
@@ -7312,15 +7818,18 @@ public  TensorScatterNdMin tensorScatterNdMin(Operand ten
    *      updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
    *      print(updated)
    *  
- *

The resulting tensor would look like this: - *

+   *
+   * 

The resulting tensor would look like this: + * + *

    *  [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
    *   [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
    *   [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
    *   [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
    *  
- *

Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * + *

Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out + * of bound index is found, the index is ignored. * * @param data type for {@code output} output * @param tensor Tensor to copy/update. @@ -7329,42 +7838,53 @@ public TensorScatterNdMin tensorScatterNdMin(Operand ten * @param data type for {@code TensorScatterSub} output and operands * @return a new instance of TensorScatterNdSub */ - public TensorScatterNdSub tensorScatterNdSub(Operand tensor, - Operand indices, Operand updates) { + public TensorScatterNdSub tensorScatterNdSub( + Operand tensor, Operand indices, Operand updates) { return TensorScatterNdSub.create(scope, tensor, indices, updates); } /** - * Scatter {@code updates} into an existing tensor according to {@code indices}. - * This operation creates a new tensor by applying sparse {@code updates} to the passed - * in {@code tensor}. - * This operation is very similar to {@code tf.scatter_nd}, except that the updates are - * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory - * for the existing tensor cannot be re-used, a copy is made and updated. - *

If {@code indices} contains duplicates, then we pick the last update for the index. - *

If an out of bound index is found on CPU, an error is returned. - *

WARNING: There are some GPU specific semantics for this operation. - *

    - *
  • If an out of bound index is found, the index is ignored.
  • - *
  • The order in which updates are applied is nondeterministic, so the output - * will be nondeterministic if {@code indices} contains duplicates.
  • - *
- *

{@code indices} is an integer tensor containing indices into a new tensor of shape - * {@code shape}. - *

    - *
  • {@code indices} must have at least 2 axes: {@code (num_updates, index_depth)}.
  • - *
  • The last axis of {@code indices} is how deep to index into {@code tensor} so this index - * depth must be less than the rank of {@code tensor}: {@code indices.shape[-1] <= tensor.ndim}
  • - *
- *

if {@code indices.shape[-1] = tensor.rank} this Op indexes and updates scalar elements. - * if {@code indices.shape[-1] < tensor.rank} it indexes and updates slices of the input - * {@code tensor}. - *

Each {@code update} has a rank of {@code tensor.rank - indices.shape[-1]}. - * The overall shape of {@code updates} is: - *

+   * Scatter {@code updates} into an existing tensor according to {@code indices}. This operation
+   * creates a new tensor by applying sparse {@code updates} to the passed in {@code tensor}. This
+   * operation is very similar to {@code tf.scatter_nd}, except that the updates are scattered onto
+   * an existing tensor (as opposed to a zero-tensor). If the memory for the existing tensor cannot
+   * be re-used, a copy is made and updated.
+   *
+   * 

If {@code indices} contains duplicates, then we pick the last update for the index. + * + *

If an out of bound index is found on CPU, an error is returned. + * + *

WARNING: There are some GPU specific semantics for this operation. + * + *

    + *
  • If an out of bound index is found, the index is ignored. + *
  • The order in which updates are applied is nondeterministic, so the output will be + * nondeterministic if {@code indices} contains duplicates. + *
+ * + *

{@code indices} is an integer tensor containing indices into a new tensor of shape {@code + * shape}. + * + *

    + *
  • {@code indices} must have at least 2 axes: {@code (num_updates, index_depth)}. + *
  • The last axis of {@code indices} is how deep to index into {@code tensor} so this index + * depth must be less than the rank of {@code tensor}: {@code indices.shape[-1] <= + * tensor.ndim} + *
+ * + *

if {@code indices.shape[-1] = tensor.rank} this Op indexes and updates scalar elements. if + * {@code indices.shape[-1] < tensor.rank} it indexes and updates slices of the input {@code + * tensor}. + * + *

Each {@code update} has a rank of {@code tensor.rank - indices.shape[-1]}. The overall shape + * of {@code updates} is: + * + *

    *  indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
    *  
- *

For usage examples see the python tf.tensor_scatter_nd_update {@link org.tensorflow.op.Ops#tensorScatterNdUpdate} function + * + *

For usage examples see the python tf.tensor_scatter_nd_update {@link + * org.tensorflow.op.Ops#tensorScatterNdUpdate} function * * @param data type for {@code output} output * @param tensor Tensor to copy/update. @@ -7373,18 +7893,19 @@ public TensorScatterNdSub tensorScatterNdSub(Operand ten * @param data type for {@code TensorScatterUpdate} output and operands * @return a new instance of TensorScatterNdUpdate */ - public TensorScatterNdUpdate tensorScatterNdUpdate(Operand tensor, - Operand indices, Operand updates) { + public TensorScatterNdUpdate tensorScatterNdUpdate( + Operand tensor, Operand indices, Operand updates) { return TensorScatterNdUpdate.create(scope, tensor, indices, updates); } /** - * Assign {@code value} to the sliced l-value reference of {@code input}. - * The values of {@code value} are assigned to the positions in the tensor {@code input} that - * are selected by the slice parameters. The slice parameters {@code begin} {@code end} - * {@code strides} etc. work exactly as in {@code StridedSlice}. - *

NOTE this op currently does not support broadcasting and so {@code value}'s shape - * must be exactly the shape produced by the slice of {@code input}. + * Assign {@code value} to the sliced l-value reference of {@code input}. The values of {@code + * value} are assigned to the positions in the tensor {@code input} that are selected by the slice + * parameters. The slice parameters {@code begin} {@code end} {@code strides} etc. work exactly as + * in {@code StridedSlice}. + * + *

NOTE this op currently does not support broadcasting and so {@code value}'s shape must be + * exactly the shape produced by the slice of {@code input}. * * @param data type for {@code output} output * @param input the input value @@ -7398,44 +7919,41 @@ public TensorScatterNdUpdate tensorScatterNdUpdate(Operand< * @return a new instance of TensorStridedSliceUpdate */ public TensorStridedSliceUpdate tensorStridedSliceUpdate( - Operand input, Operand begin, Operand end, Operand strides, Operand value, + Operand input, + Operand begin, + Operand end, + Operand strides, + Operand value, TensorStridedSliceUpdate.Options... options) { return TensorStridedSliceUpdate.create(scope, input, begin, end, strides, value, options); } /** - * Constructs a tensor by tiling a given tensor. - * This operation creates a new tensor by replicating {@code input} {@code multiples} times. - * The output tensor's i'th dimension has {@code input.dims(i) * multiples[i]} elements, - * and the values of {@code input} are replicated {@code multiples[i]} times along the 'i'th - * dimension. For example, tiling {@code [a b c d]} by {@code [2]} produces - * {@code [a b c d a b c d]}. - *

- *
- *
- *

a = tf.constant([[1,2,3],[4,5,6]], tf.int32) - * b = tf.constant([1,2], tf.int32) - * tf.tile(a, b) - * <tf.Tensor: shape=(2, 6), dtype=int32, numpy= - * array([[1, 2, 3, 1, 2, 3], - * [4, 5, 6, 4, 5, 6]], dtype=int32)> - * c = tf.constant([2,1], tf.int32) - * tf.tile(a, c) - * <tf.Tensor: shape=(4, 3), dtype=int32, numpy= - * array([[1, 2, 3], - * [4, 5, 6], - * [1, 2, 3], - * [4, 5, 6]], dtype=int32)> - * d = tf.constant([2,2], tf.int32) - * tf.tile(a, d) - * <tf.Tensor: shape=(4, 6), dtype=int32, numpy= - * array([[1, 2, 3, 1, 2, 3], - * [4, 5, 6, 4, 5, 6], - * [1, 2, 3, 1, 2, 3], - * [4, 5, 6, 4, 5, 6]], dtype=int32)> - *

- *
- *
+ * Constructs a tensor by tiling a given tensor. This operation creates a new tensor by + * replicating {@code input} {@code multiples} times. The output tensor's i'th dimension has + * {@code input.dims(i) * multiples[i]} elements, and the values of {@code input} are replicated + * {@code multiples[i]} times along the 'i'th dimension. For example, tiling {@code [a b c d]} by + * {@code [2]} produces {@code [a b c d a b c d]}. + * + *
+ * + *
+ * + *
+ * + *

a = tf.constant([[1,2,3],[4,5,6]], tf.int32) b = tf.constant([1,2], tf.int32) tf.tile(a, b) + * <tf.Tensor: shape=(2, 6), dtype=int32, numpy= array([[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, + * 6]], dtype=int32)> c = tf.constant([2,1], tf.int32) tf.tile(a, c) <tf.Tensor: shape=(4, + * 3), dtype=int32, numpy= array([[1, 2, 3], [4, 5, 6], [1, 2, 3], [4, 5, 6]], dtype=int32)> d + * = tf.constant([2,2], tf.int32) tf.tile(a, d) <tf.Tensor: shape=(4, 6), dtype=int32, numpy= + * array([[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6], [1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]], + * dtype=int32)> + * + *

+ * + *
+ * + *
* * @param data type for {@code output} output * @param input 1-D or higher. @@ -7448,10 +7966,10 @@ public Tile tile(Operand input, OperandNote: the timestamp is computed when the op is executed, not when it is added - * to the graph. + * Provides the time since epoch in seconds. Returns the timestamp as a {@code float64} for + * seconds since the Unix epoch. + * + *

Note: the timestamp is computed when the op is executed, not when it is added to the graph. * * @return a new instance of Timestamp */ @@ -7460,19 +7978,15 @@ public Timestamp timestamp() { } /** - * Returns the TopK unique values in the array in sorted order. - * The running time is proportional to the product of K and the input - * size. Sorting the whole array is more efficient for sufficiently large - * values of K. The median-of-medians algorithm is probably faster, but - * difficult to implement efficiently in XLA. If there are fewer than K - * unique numbers (not NANs), the results are padded with negative - * infinity. NaNs are never returned. Subnormal numbers are flushed to - * zero. If an element appears at multiple indices, the highest index is - * returned. If a TopK element never appears in the input due to padding - * values, the indices are padded with negative one. If a padding value - * appears in the input and padding is needed, the highest index of the - * padding value will be returned. The semantics are not the same as - * kth_order_statistic. + * Returns the TopK unique values in the array in sorted order. The running time is proportional + * to the product of K and the input size. Sorting the whole array is more efficient for + * sufficiently large values of K. The median-of-medians algorithm is probably faster, but + * difficult to implement efficiently in XLA. If there are fewer than K unique numbers (not NANs), + * the results are padded with negative infinity. NaNs are never returned. Subnormal numbers are + * flushed to zero. If an element appears at multiple indices, the highest index is returned. If a + * TopK element never appears in the input due to padding values, the indices are padded with + * negative one. If a padding value appears in the input and padding is needed, the highest index + * of the padding value will be returned. The semantics are not the same as kth_order_statistic. * * @param input the input value * @param k the value of the k property @@ -7483,12 +7997,10 @@ public TopKUnique topKUnique(Operand input, Long k) { } /** - * Returns the TopK values in the array in sorted order. - * This is a combination of MakeUnique and TopKUnique. The returned top-K will - * have its lower bits replaced by iota, thus it will be close to the original - * value but not exactly the same. The running time is proportional to the product - * of K and the input size. NaNs are never returned. Subnormal numbers are flushed - * to zero. + * Returns the TopK values in the array in sorted order. This is a combination of MakeUnique and + * TopKUnique. The returned top-K will have its lower bits replaced by iota, thus it will be close + * to the original value but not exactly the same. The running time is proportional to the product + * of K and the input size. NaNs are never returned. Subnormal numbers are flushed to zero. * * @param input the input value * @param k the value of the k property @@ -7499,24 +8011,20 @@ public TopKWithUnique topKWithUnique(Operand input, Long k) { } /** - * Reverses the operation of Batch for a single output Tensor. - * An instance of Unbatch either receives an empty batched_tensor, in which case it - * asynchronously waits until the values become available from a concurrently - * running instance of Unbatch with the same container and shared_name, or receives - * a non-empty batched_tensor in which case it finalizes all other concurrently - * running instances and outputs its own element from the batch. - *

batched_tensor: The possibly transformed output of Batch. The size of the first - * dimension should remain unchanged by the transformations for the operation to - * work. - * batch_index: The matching batch_index obtained from Batch. - * id: The id scalar emitted by Batch. - * unbatched_tensor: The Tensor corresponding to this execution. - * timeout_micros: Maximum amount of time (in microseconds) to wait to receive the - * batched input tensor associated with a given invocation of the op. - * container: Container to control resource sharing. - * shared_name: Instances of Unbatch with the same container and shared_name are - * assumed to possibly belong to the same batch. If left empty, the op name will - * be used as the shared name. + * Reverses the operation of Batch for a single output Tensor. An instance of Unbatch either + * receives an empty batched_tensor, in which case it asynchronously waits until the values become + * available from a concurrently running instance of Unbatch with the same container and + * shared_name, or receives a non-empty batched_tensor in which case it finalizes all other + * concurrently running instances and outputs its own element from the batch. + * + *

batched_tensor: The possibly transformed output of Batch. The size of the first dimension + * should remain unchanged by the transformations for the operation to work. batch_index: The + * matching batch_index obtained from Batch. id: The id scalar emitted by Batch. unbatched_tensor: + * The Tensor corresponding to this execution. timeout_micros: Maximum amount of time (in + * microseconds) to wait to receive the batched input tensor associated with a given invocation of + * the op. container: Container to control resource sharing. shared_name: Instances of Unbatch + * with the same container and shared_name are assumed to possibly belong to the same batch. If + * left empty, the op name will be used as the shared name. * * @param data type for {@code unbatched_tensor} output * @param batchedTensor the batchedTensor value @@ -7527,26 +8035,26 @@ public TopKWithUnique topKWithUnique(Operand input, Long k) { * @param data type for {@code Unbatch} output and operands * @return a new instance of Unbatch */ - public Unbatch unbatch(Operand batchedTensor, Operand batchIndex, - Operand id, Long timeoutMicros, Unbatch.Options... options) { + public Unbatch unbatch( + Operand batchedTensor, + Operand batchIndex, + Operand id, + Long timeoutMicros, + Unbatch.Options... options) { return Unbatch.create(scope, batchedTensor, batchIndex, id, timeoutMicros, options); } /** - * Gradient of Unbatch. - * Acts like Batch but using the given batch_index index of batching things as they - * become available. This ensures that the gradients are propagated back in the - * same session which did the forward pass. - *

original_input: The input to the Unbatch operation this is the gradient of. - * batch_index: The batch_index given to the Unbatch operation this is the gradient - * of. - * grad: The downstream gradient. - * id: The id scalar emitted by Batch. - * batched_grad: The return value, either an empty tensor or the batched gradient. - * container: Container to control resource sharing. - * shared_name: Instances of UnbatchGrad with the same container and shared_name - * are assumed to possibly belong to the same batch. If left empty, the op name - * will be used as the shared name. + * Gradient of Unbatch. Acts like Batch but using the given batch_index index of batching things + * as they become available. This ensures that the gradients are propagated back in the same + * session which did the forward pass. + * + *

original_input: The input to the Unbatch operation this is the gradient of. batch_index: The + * batch_index given to the Unbatch operation this is the gradient of. grad: The downstream + * gradient. id: The id scalar emitted by Batch. batched_grad: The return value, either an empty + * tensor or the batched gradient. container: Container to control resource sharing. shared_name: + * Instances of UnbatchGrad with the same container and shared_name are assumed to possibly belong + * to the same batch. If left empty, the op name will be used as the shared name. * * @param data type for {@code batched_grad} output * @param originalInput the originalInput value @@ -7557,31 +8065,37 @@ public Unbatch unbatch(Operand batchedTensor, Operand data type for {@code UnbatchGrad} output and operands * @return a new instance of UnbatchGrad */ - public UnbatchGrad unbatchGrad(Operand originalInput, - Operand batchIndex, Operand grad, Operand id, + public UnbatchGrad unbatchGrad( + Operand originalInput, + Operand batchIndex, + Operand grad, + Operand id, UnbatchGrad.Options... options) { return UnbatchGrad.create(scope, originalInput, batchIndex, grad, id, options); } /** - * Finds unique elements along an axis of a tensor. - * This operation either returns a tensor {@code y} containing unique elements - * along the {@code axis} of a tensor. The returned unique elements is sorted - * in the same order as they occur along {@code axis} in {@code x}. - * This operation also returns a tensor {@code idx} that is the same size as - * the number of the elements in {@code x} along the {@code axis} dimension. It - * contains the index in the unique output {@code y}. - * In other words, for an {@code 1-D} tensor {@code x} with `axis = None: - *

{@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} - *

For example: - *

+   * Finds unique elements along an axis of a tensor. This operation either returns a tensor {@code
+   * y} containing unique elements along the {@code axis} of a tensor. The returned unique elements
+   * is sorted in the same order as they occur along {@code axis} in {@code x}. This operation also
+   * returns a tensor {@code idx} that is the same size as the number of the elements in {@code x}
+   * along the {@code axis} dimension. It contains the index in the unique output {@code y}. In
+   * other words, for an {@code 1-D} tensor {@code x} with `axis = None:
+   *
+   * 

{@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} + * + *

For example: + * + *

    *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
    *  y, idx = unique(x)
    *  y ==> [1, 2, 4, 7, 8]
    *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
    *  
- *

For an {@code 2-D} tensor {@code x} with {@code axis = 0}: - *

+   *
+   * 

For an {@code 2-D} tensor {@code x} with {@code axis = 0}: + * + *

    *  # tensor 'x' is [[1, 0, 0],
    *  #                [1, 0, 0],
    *  #                [2, 0, 0]]
@@ -7590,8 +8104,10 @@ public  UnbatchGrad unbatchGrad(Operand originalInput,
    *         [2, 0, 0]]
    *  idx ==> [0, 0, 1]
    *  
- *

For an {@code 2-D} tensor {@code x} with {@code axis = 1}: - *

+   *
+   * 

For an {@code 2-D} tensor {@code x} with {@code axis = 1}: + * + *

    *  # tensor 'x' is [[1, 0, 0],
    *  #                [1, 0, 0],
    *  #                [2, 0, 0]]
@@ -7606,7 +8122,7 @@ public  UnbatchGrad unbatchGrad(Operand originalInput,
    * @param  data type for {@code idx} output
    * @param x A {@code Tensor}.
    * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to
-   *  find the unique elements.
+   *     find the unique elements.
    * @param  data type for {@code UniqueV2} output and operands
    * @return a new instance of Unique, with default output types
    */
@@ -7615,24 +8131,27 @@ public  Unique unique(Operand x, Operand{@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]}
-   *  

For example: - *

+   * Finds unique elements along an axis of a tensor. This operation either returns a tensor {@code
+   * y} containing unique elements along the {@code axis} of a tensor. The returned unique elements
+   * is sorted in the same order as they occur along {@code axis} in {@code x}. This operation also
+   * returns a tensor {@code idx} that is the same size as the number of the elements in {@code x}
+   * along the {@code axis} dimension. It contains the index in the unique output {@code y}. In
+   * other words, for an {@code 1-D} tensor {@code x} with `axis = None:
+   *
+   * 

{@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} + * + *

For example: + * + *

    *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
    *  y, idx = unique(x)
    *  y ==> [1, 2, 4, 7, 8]
    *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
    *  
- *

For an {@code 2-D} tensor {@code x} with {@code axis = 0}: - *

+   *
+   * 

For an {@code 2-D} tensor {@code x} with {@code axis = 0}: + * + *

    *  # tensor 'x' is [[1, 0, 0],
    *  #                [1, 0, 0],
    *  #                [2, 0, 0]]
@@ -7641,8 +8160,10 @@ public  Unique unique(Operand x, Operand
-   *  

For an {@code 2-D} tensor {@code x} with {@code axis = 1}: - *

+   *
+   * 

For an {@code 2-D} tensor {@code x} with {@code axis = 1}: + * + *

    *  # tensor 'x' is [[1, 0, 0],
    *  #                [1, 0, 0],
    *  #                [2, 0, 0]]
@@ -7657,38 +8178,41 @@ public  Unique unique(Operand x, Operand data type for {@code idx} output
    * @param x A {@code Tensor}.
    * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to
-   *  find the unique elements.
+   *     find the unique elements.
    * @param outIdx the value of the outIdx property
    * @param  data type for {@code UniqueV2} output and operands
    * @param  data type for {@code UniqueV2} output and operands
    * @return a new instance of Unique
    */
-  public  Unique unique(Operand x,
-      Operand axis, Class outIdx) {
+  public  Unique unique(
+      Operand x, Operand axis, Class outIdx) {
     return Unique.create(scope, x, axis, outIdx);
   }
 
   /**
-   * Finds unique elements along an axis of a tensor.
-   *  This operation either returns a tensor {@code y} containing unique elements
-   *  along the {@code axis} of a tensor. The returned unique elements is sorted
-   *  in the same order as they occur along {@code axis} in {@code x}.
-   *  This operation also returns a tensor {@code idx} and a tensor {@code count}
-   *  that are the same size as the number of the elements in {@code x} along the
-   *  {@code axis} dimension. The {@code idx} contains the index in the unique output {@code y}
-   *  and the {@code count} contains the count in the unique output {@code y}.
-   *  In other words, for an {@code 1-D} tensor {@code x} with `axis = None:
-   *  

{@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} - *

For example: - *

+   * Finds unique elements along an axis of a tensor. This operation either returns a tensor {@code
+   * y} containing unique elements along the {@code axis} of a tensor. The returned unique elements
+   * is sorted in the same order as they occur along {@code axis} in {@code x}. This operation also
+   * returns a tensor {@code idx} and a tensor {@code count} that are the same size as the number of
+   * the elements in {@code x} along the {@code axis} dimension. The {@code idx} contains the index
+   * in the unique output {@code y} and the {@code count} contains the count in the unique output
+   * {@code y}. In other words, for an {@code 1-D} tensor {@code x} with `axis = None:
+   *
+   * 

{@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} + * + *

For example: + * + *

    *  x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
    *  y, idx, count = UniqueWithCountsV2(x, axis = [0])
    *  y ==> [1, 2, 4, 7, 8]
    *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
    *  count ==> [2, 1, 3, 1, 2]
    *  
- *

For a {@code 2-D} tensor {@code x} with {@code axis = 0}: - *

+   *
+   * 

For a {@code 2-D} tensor {@code x} with {@code axis = 0}: + * + *

    *  x = tf.constant([[1, 0, 0],
    *                  [1, 0, 0],
    *                  [2, 0, 0]])
@@ -7698,8 +8222,10 @@ public  Unique unique(Operand x,
    *  idx ==> [0, 0, 1]
    *  count ==> [2, 1]
    *  
- *

For a {@code 2-D} tensor {@code x} with {@code axis = 1}: - *

+   *
+   * 

For a {@code 2-D} tensor {@code x} with {@code axis = 1}: + * + *

    *  x = tf.constant([[1, 0, 0],
    *                  [1, 0, 0],
    *                  [2, 0, 0]])
@@ -7715,36 +8241,39 @@ public  Unique unique(Operand x,
    * @param  data type for {@code idx} output
    * @param x A {@code Tensor}.
    * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to
-   *  find the unique elements.
+   *     find the unique elements.
    * @param  data type for {@code UniqueWithCountsV2} output and operands
    * @return a new instance of UniqueWithCounts, with default output types
    */
-  public  UniqueWithCounts uniqueWithCounts(Operand x,
-      Operand axis) {
+  public  UniqueWithCounts uniqueWithCounts(
+      Operand x, Operand axis) {
     return UniqueWithCounts.create(scope, x, axis);
   }
 
   /**
-   * Finds unique elements along an axis of a tensor.
-   *  This operation either returns a tensor {@code y} containing unique elements
-   *  along the {@code axis} of a tensor. The returned unique elements is sorted
-   *  in the same order as they occur along {@code axis} in {@code x}.
-   *  This operation also returns a tensor {@code idx} and a tensor {@code count}
-   *  that are the same size as the number of the elements in {@code x} along the
-   *  {@code axis} dimension. The {@code idx} contains the index in the unique output {@code y}
-   *  and the {@code count} contains the count in the unique output {@code y}.
-   *  In other words, for an {@code 1-D} tensor {@code x} with `axis = None:
-   *  

{@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} - *

For example: - *

+   * Finds unique elements along an axis of a tensor. This operation either returns a tensor {@code
+   * y} containing unique elements along the {@code axis} of a tensor. The returned unique elements
+   * is sorted in the same order as they occur along {@code axis} in {@code x}. This operation also
+   * returns a tensor {@code idx} and a tensor {@code count} that are the same size as the number of
+   * the elements in {@code x} along the {@code axis} dimension. The {@code idx} contains the index
+   * in the unique output {@code y} and the {@code count} contains the count in the unique output
+   * {@code y}. In other words, for an {@code 1-D} tensor {@code x} with `axis = None:
+   *
+   * 

{@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} + * + *

For example: + * + *

    *  x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
    *  y, idx, count = UniqueWithCountsV2(x, axis = [0])
    *  y ==> [1, 2, 4, 7, 8]
    *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
    *  count ==> [2, 1, 3, 1, 2]
    *  
- *

For a {@code 2-D} tensor {@code x} with {@code axis = 0}: - *

+   *
+   * 

For a {@code 2-D} tensor {@code x} with {@code axis = 0}: + * + *

    *  x = tf.constant([[1, 0, 0],
    *                  [1, 0, 0],
    *                  [2, 0, 0]])
@@ -7754,8 +8283,10 @@ public  UniqueWithCounts uniqueWithCounts(Operand
    *  idx ==> [0, 0, 1]
    *  count ==> [2, 1]
    *  
- *

For a {@code 2-D} tensor {@code x} with {@code axis = 1}: - *

+   *
+   * 

For a {@code 2-D} tensor {@code x} with {@code axis = 1}: + * + *

    *  x = tf.constant([[1, 0, 0],
    *                  [1, 0, 0],
    *                  [2, 0, 0]])
@@ -7771,21 +8302,21 @@ public  UniqueWithCounts uniqueWithCounts(Operand
    * @param  data type for {@code idx} output
    * @param x A {@code Tensor}.
    * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to
-   *  find the unique elements.
+   *     find the unique elements.
    * @param outIdx the value of the outIdx property
    * @param  data type for {@code UniqueWithCountsV2} output and operands
    * @param  data type for {@code UniqueWithCountsV2} output and operands
    * @return a new instance of UniqueWithCounts
    */
-  public  UniqueWithCounts uniqueWithCounts(Operand x,
-      Operand axis, Class outIdx) {
+  public  UniqueWithCounts uniqueWithCounts(
+      Operand x, Operand axis, Class outIdx) {
     return UniqueWithCounts.create(scope, x, axis, outIdx);
   }
 
   /**
-   * Converts an array of flat indices into a tuple of coordinate arrays.
-   *  Example:
-   *  
+   * Converts an array of flat indices into a tuple of coordinate arrays. Example:
+   *
+   * 
    *  y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])
    *  # 'dims' represent a hypothetical (3, 3) tensor of indices:
    *  # [[0, 1, *2*],
@@ -7798,15 +8329,15 @@ public  UniqueWithCounts uniqueWithCou
    *  # 7 ==> (2, 1)
    *  y ==> [[0, 1, 2], [2, 2, 1]]
    *  
- *

{@literal @}compatibility(numpy)
- * Equivalent to np.unravel_index - *
{@literal @}end_compatibility + * + *

{@literal @}compatibility(numpy)
+ * Equivalent to np.unravel_index
+ * {@literal @}end_compatibility * * @param data type for {@code output} output - * @param indices An 0-D or 1-D {@code int} Tensor whose elements are indices into the - * flattened version of an array of dimensions dims. - * @param dims An 1-D {@code int} Tensor. The shape of the array to use for unraveling - * indices. + * @param indices An 0-D or 1-D {@code int} Tensor whose elements are indices into the flattened + * version of an array of dimensions dims. + * @param dims An 1-D {@code int} Tensor. The shape of the array to use for unraveling indices. * @param data type for {@code UnravelIndex} output and operands * @return a new instance of UnravelIndex */ @@ -7815,16 +8346,18 @@ public UnravelIndex unravelIndex(Operand indices, Oper } /** - * Unpacks a given dimension of a rank-{@code R} tensor into {@code num} rank-{@code (R-1)} tensors. - * Unpacks {@code num} tensors from {@code value} by chipping it along the {@code axis} dimension. - * For example, given a tensor of shape {@code (A, B, C, D)}; - *

If {@code axis == 0} then the i'th tensor in {@code output} is the slice {@code value[i, :, :, :]} - * and each tensor in {@code output} will have shape {@code (B, C, D)}. (Note that the - * dimension unpacked along is gone, unlike {@code split}). - *

If {@code axis == 1} then the i'th tensor in {@code output} is the slice {@code value[:, i, :, :]} - * and each tensor in {@code output} will have shape {@code (A, C, D)}. - * Etc. - *

This is the opposite of {@code pack}. + * Unpacks a given dimension of a rank-{@code R} tensor into {@code num} rank-{@code (R-1)} + * tensors. Unpacks {@code num} tensors from {@code value} by chipping it along the {@code axis} + * dimension. For example, given a tensor of shape {@code (A, B, C, D)}; + * + *

If {@code axis == 0} then the i'th tensor in {@code output} is the slice {@code value[i, :, + * :, :]} and each tensor in {@code output} will have shape {@code (B, C, D)}. (Note that the + * dimension unpacked along is gone, unlike {@code split}). + * + *

If {@code axis == 1} then the i'th tensor in {@code output} is the slice {@code value[:, i, + * :, :]} and each tensor in {@code output} will have shape {@code (A, C, D)}. Etc. + * + *

This is the opposite of {@code pack}. * * @param data type for {@code output} output * @param value 1-D or higher, with {@code axis} dimension size equal to {@code num}. @@ -7833,15 +8366,14 @@ public UnravelIndex unravelIndex(Operand indices, Oper * @param data type for {@code Unpack} output and operands * @return a new instance of Unstack */ - public Unstack unstack(Operand value, Long num, - Unstack.Options... options) { + public Unstack unstack( + Operand value, Long num, Unstack.Options... options) { return Unstack.create(scope, value, num, options); } /** - * Op is similar to a lightweight Dequeue. - * The basic functionality is similar to dequeue with many fewer - * capabilities and options. This Op is optimized for performance. + * Op is similar to a lightweight Dequeue. The basic functionality is similar to dequeue with many + * fewer capabilities and options. This Op is optimized for performance. * * @param dtypes the value of the dtypes property * @param options carries optional attribute values @@ -7854,15 +8386,15 @@ public Unstage unstage(List> dtypes, Unstage.Options... o /** * Creates a handle to a Variable resource. * - * @param dtype the type of this variable. Must agree with the dtypes - * of all ops using this variable. + * @param dtype the type of this variable. Must agree with the dtypes of all ops using this + * variable. * @param shape The (possibly partially specified) shape of this variable. * @param options carries optional attribute values * @param data type for {@code VarHandleOp} output and operands * @return a new instance of VarHandleOp */ - public VarHandleOp varHandleOp(Class dtype, Shape shape, - VarHandleOp.Options... options) { + public VarHandleOp varHandleOp( + Class dtype, Shape shape, VarHandleOp.Options... options) { return VarHandleOp.create(scope, dtype, shape, options); } @@ -7878,10 +8410,10 @@ public VarIsInitializedOp varIsInitializedOp(Operand resource) /** * Factory method to create a new Variable with its initializer. Both the creation and assignment - * are done in the init scope. + * are done in the init scope. * - *

Only supported on Graph sessions as the {@link org.tensorflow.op.core.Assign} op does not - * work in an EagerSession. + *

Only supported on Graph sessions as the {@link org.tensorflow.op.core.Assign} op does not + * work in an EagerSession. * * @param init The op to use to initialise this variable. * @param options carries optional attributes values @@ -7892,10 +8424,9 @@ public Variable variable(Operand init, Variable.Options. } /** - * Holds state in the form of a tensor that persists across steps. - * Outputs a ref to the tensor state so it may be read or modified. - * TODO(zhifengc/mrry): Adds a pointer to a more detail document - * about sharing states in tensorflow. + * Holds state in the form of a tensor that persists across steps. Outputs a ref to the tensor + * state so it may be read or modified. TODO(zhifengc/mrry): Adds a pointer to a more detail + * document about sharing states in tensorflow. * * @param data type for {@code ref} output * @param shape The shape of the variable tensor. @@ -7904,16 +8435,18 @@ public Variable variable(Operand init, Variable.Options. * @param data type for {@code VariableV2} output and operands * @return a new instance of Variable */ - public Variable variable(Shape shape, Class dtype, - Variable.Options... options) { + public Variable variable( + Shape shape, Class dtype, Variable.Options... options) { return Variable.create(scope, shape, dtype, options); } /** - * Returns the shape of the variable pointed to by {@code resource}. - * This operation returns a 1-D integer tensor representing the shape of {@code input}. - *

For example: - *

+   * Returns the shape of the variable pointed to by {@code resource}. This operation returns a 1-D
+   * integer tensor representing the shape of {@code input}.
+   *
+   * 

For example: + * + *

    *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
    *  shape(t) ==> [2, 2, 3]
    *  
@@ -7927,10 +8460,12 @@ public VariableShape variableShape(Operand input) { } /** - * Returns the shape of the variable pointed to by {@code resource}. - * This operation returns a 1-D integer tensor representing the shape of {@code input}. - *

For example: - *

+   * Returns the shape of the variable pointed to by {@code resource}. This operation returns a 1-D
+   * integer tensor representing the shape of {@code input}.
+   *
+   * 

For example: + * + *

    *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
    *  shape(t) ==> [2, 2, 3]
    *  
@@ -7941,21 +8476,22 @@ public VariableShape variableShape(Operand input) { * @param data type for {@code VariableShape} output and operands * @return a new instance of VariableShape */ - public VariableShape variableShape(Operand input, - Class outType) { + public VariableShape variableShape( + Operand input, Class outType) { return VariableShape.create(scope, input, outType); } /** - * Returns locations of nonzero / true values in a tensor. - * This operation returns the coordinates of true elements in {@code condition}. The - * coordinates are returned in a 2-D tensor where the first dimension (rows) - * represents the number of true elements, and the second dimension (columns) - * represents the coordinates of the true elements. Keep in mind, the shape of - * the output tensor can vary depending on how many true values there are in - * {@code condition}. Indices are output in row-major order. - *

For example: - *

+   * Returns locations of nonzero / true values in a tensor. This operation returns the coordinates
+   * of true elements in {@code condition}. The coordinates are returned in a 2-D tensor where the
+   * first dimension (rows) represents the number of true elements, and the second dimension
+   * (columns) represents the coordinates of the true elements. Keep in mind, the shape of the
+   * output tensor can vary depending on how many true values there are in {@code condition}.
+   * Indices are output in row-major order.
+   *
+   * 

For example: + * + *

    *  # 'input' tensor is [[True, False]
    *  #                    [True, False]]
    *  # 'input' has two true values, so output has two coordinates.
@@ -8016,10 +8552,12 @@ public Where where(Operand condition) {
   /**
    * output = input; While (Cond(output)) { output = Body(output) }
    *
-   *  

Selects between {@link StatefulWhile} and {@link StatelessWhile} based on the statefulness of the function arguments. + *

Selects between {@link StatefulWhile} and {@link StatelessWhile} based on the statefulness + * of the function arguments. * * @param input A list of input tensors whose types are T. - * @param cond

+   * @param cond
+   *     
    *    A function takes 'input' and returns a tensor.  If the tensor is
    *    a scalar of non-boolean, the scalar is converted to a boolean
    *    according to the following rule: if the scalar is a numerical
@@ -8028,15 +8566,21 @@ public Where where(Operand condition) {
    *    tensor is not a scalar, non-emptiness means True and False
    *    otherwise.
    *  
- * @param body
+   *
+   * @param body
+   *     
    *    A function that takes a list of tensors and returns another
    *    list of tensors. Both lists have the same types as specified
    *    by T.
    *  
+ * * @param options carries optional attribute values * @return a new instance of While */ - public While whileOp(Iterable> input, ConcreteFunction cond, ConcreteFunction body, + public While whileOp( + Iterable> input, + ConcreteFunction cond, + ConcreteFunction body, While.Options... options) { return While.create(scope, input, cond, body, options); } @@ -8075,13 +8619,16 @@ public Ops withSubScope(String childScopeName) { } /** - * Returns an API that builds init operations. {@link #liftToInitScope(Operand)} will be called for all created operations. - *

- * Init operations will be initialized at session creation, will have their inputs (and control inputs) made init ops as well, - * and are ignored when used as control dependencies. - * Additionally, this scope ignores any control dependencies. - *

- * If an input can not be made an init op (i.e. a Placeholder), will throw an {@link IllegalStateException} on op creation. + * Returns an API that builds init operations. {@link #liftToInitScope(Operand)} will be called + * for all created operations. + * + *

Init operations will be initialized at session creation, will have their inputs (and control + * inputs) made init ops as well, and are ignored when used as control dependencies. Additionally, + * this scope ignores any control dependencies. + * + *

If an input can not be made an init op (i.e. a Placeholder), will throw an {@link + * IllegalStateException} on op creation. + * * @see #liftToInitScope(Operand) */ public Ops withInitScope() { @@ -8090,14 +8637,15 @@ public Ops withInitScope() { /** * Make {@code op} an init operation, doing the same for all of it's inputs (and control inputs). - *

- * Init operations will be initialized at session creation, will have their inputs (and control inputs) made init ops as well, - * and are ignored when used as control dependencies. - * Additionally, this scope ignores any control dependencies. - *

- * If an input can not be made an init op (i.e. a Placeholder), will throw an {@link IllegalStateException} on op creation. - * @see ExecutionEnvironment#registerInitOp(Operation) * + *

Init operations will be initialized at session creation, will have their inputs (and control + * inputs) made init ops as well, and are ignored when used as control dependencies. Additionally, + * this scope ignores any control dependencies. + * + *

If an input can not be made an init op (i.e. a Placeholder), will throw an {@link + * IllegalStateException} on op creation. + * + * @see ExecutionEnvironment#registerInitOp(Operation) * @throws IllegalStateException if the op or one of its inputs can't be made an init op. */ public T liftToInitScope(T op) { @@ -8132,16 +8680,12 @@ public Ops withControlDependencies(Iterable controls) { return new Ops(scope.withControlDependencies(controls)); } - /** - * Returns the current {@link Scope scope} of this API - */ + /** Returns the current {@link Scope scope} of this API */ public final Scope scope() { return scope; } - /** - * Creates an API for building operations in the provided execution environment - */ + /** Creates an API for building operations in the provided execution environment */ public static Ops create(ExecutionEnvironment env) { return new Ops(env.baseScope()); }