diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 6cf6d4fe232..3bf50107a61 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -252,12 +252,20 @@ import org.tensorflow.op.core.Zeros; import org.tensorflow.op.core.ZerosLike; import org.tensorflow.tools.Shape; +import org.tensorflow.tools.ndarray.BooleanNdArray; +import org.tensorflow.tools.ndarray.ByteNdArray; +import org.tensorflow.tools.ndarray.DoubleNdArray; +import org.tensorflow.tools.ndarray.FloatNdArray; +import org.tensorflow.tools.ndarray.IntNdArray; +import org.tensorflow.tools.ndarray.LongNdArray; +import org.tensorflow.tools.ndarray.NdArray; import org.tensorflow.types.TBool; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; +import org.tensorflow.types.TUint8; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -269,27 +277,27 @@ *

Example usage: *

{@code
  * try (Graph g = new Graph()) {
- *   Ops ops = Ops.create(g);
+ *   Ops tf = Ops.create(g);
  *   // Operations are typed classes with convenience
  *   // builders in Ops.
- *   Constant three = ops.constant(3);
+ *   Constant three = tf.val(3);
  *   // Single-result operations implement the Operand
  *   // interface, so this works too.
- *   Operand four = ops.constant(4);
+ *   Operand four = tf.val(4);
  *   // Most builders are found within a group, and accept
  *   // Operand types as operands
- *   Operand nine = ops.math.add(four, ops.constant(5));
+ *   Operand nine = tf.math.add(four, tf.val(5));
  *   // Multi-result operations however offer methods to
  *   // select a particular result for use.
- *   Operand result = 
- *       ops.math.add(ops.unique(s, a).y(), b);
+ *   Operand result = 
+ *       tf.math.add(tf.unique(s, a).y(), b);
  *   // Optional attributes
- *   ops.linalg.matMul(a, b, MatMul.transposeA(true));
+ *   tf.linalg.matMul(a, b, MatMul.transposeA(true));
  *   // Naming operators
- *   ops.withName("foo").constant(5); // name "foo"
+ *   tf.withName("foo").val(5); // name "foo"
  *   // Names can exist in a hierarchy
- *   Ops sub = ops.withSubScope("sub");
- *   sub.withName("bar").constant(4); // "sub/bar"
+ *   Ops sub = tf.withSubScope("sub");
+ *   sub.withName("bar").val(4); // "sub/bar"
  * }
  * }
*/ @@ -401,6 +409,96 @@ public Any any(Operand input, Operand axis, return Any.create(scope, input, axis, options); } + /** + * Creates a constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a float constant + */ + public Constant array(int... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code String} elements, using the default UTF-8 charset. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return the {@code String} constant + */ + public Constant array(String... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code boolean} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a boolean constant + */ + public Constant array(boolean... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a long constant + */ + public Constant array(long... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code float} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a float constant + */ + public Constant array(float... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code double} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a double constant + */ + public Constant array(double... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code byte} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a byte constant + */ + public Constant array(byte... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code String} elements, using the given charset. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the {@code String} constant + */ + public Constant array(Charset charset, String... data) { + return Constant.arrayOf(scope, charset, data); + } + /** * Asserts that the given condition is true. *

@@ -981,6059 +1079,6233 @@ public Concat concat(Iterable } /** - * Creates a constant containing a single {@code int} element. + * Create a constant from a Java object. * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return an integer constant - */ - public Constant constant(int data) { - return Constant.create(scope, data); - } - - /** - * Creates a rank-3 constant of {@code int} elements. + *

The argument {@code object} is first converted into a Tensor using {@link + * org.tensorflow.Tensor#create(Object)}, so only Objects supported by this method must be + * provided. For example: * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - */ - public Constant constant(int[][][] data) { - return Constant.create(scope, data); - } - - /** - * Creates a rank-4 constant of {@code String} elements, each represented as an array of {@code byte}s. + *

{@code
+   *  Constant.create(scope, new int[]{{1, 2}, {3, 4}}, TInt32.DTYPE); // returns a 2x2 integer matrix
+   *  }
* * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param object a Java object representing the constant. + * @return a constant of type `type` + * @see org.tensorflow.Tensor#create(Object) Tensor.create + * @deprecated use {@link Ops#val(Tensor)} instead */ - public Constant constant(byte[][][][][] data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(Object object, DataType type) { + return Constant.create(scope, object, type); } /** - * Creates a rank-5 constant of {@code long} elements. + * Create a {@link TInt32} constant with data from the given buffer. + * + *

Creates a constant with the given shape by copying elements from the buffer (starting from + * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents + * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this + * method. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return an integer constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TInt32>)} instead */ - public Constant constant(long[][][][][] data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(long[] shape, IntBuffer data) { + return Constant.create(scope, shape, data); } /** - * Creates a constant containing a single {@code boolean} element. + * Create a {@link TInt64} constant with data from the given buffer. + * + *

Creates a constant with the given shape by copying elements from the buffer (starting from + * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents + * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this + * method. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a boolean constant + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a long constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TInt64>)} instead */ - public Constant constant(boolean data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(long[] shape, LongBuffer data) { + return Constant.create(scope, shape, data); } /** - * Creates a rank-4 constant of {@code int} elements. + * Create a {@link TFloat64} constant with data from the given buffer. + * + *

Creates a constant with the given shape by copying elements from the buffer (starting from + * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents + * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this + * method. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a double constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TFloat64>)} instead */ - public Constant constant(int[][][][] data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(long[] shape, DoubleBuffer data) { + return Constant.create(scope, shape, data); } /** - * Creates a rank-3 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Create a {@link TFloat32} constant with data from the given buffer. + * + *

Creates a constant with the given shape by copying elements from the buffer (starting from + * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents + * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this + * method. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a float constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TFloat32>)} instead */ - public Constant constant(byte[][][][] data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(long[] shape, FloatBuffer data) { + return Constant.create(scope, shape, data); } /** - * Creates a rank-3 constant of {@code long} elements. + * Create a constant with data from the given buffer. + * + *

Creates a Constant with the provided shape of any type where the constant data has been + * encoded into {@code data} as per the specification of the TensorFlow C + * API. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param type the tensor datatype. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a constant of type `type` + * @throws IllegalArgumentException If the tensor datatype or shape is not compatible with the + * buffer + * @deprecated use {@link Ops#val(Tensor)} instead */ - public Constant constant(long[][][] data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(DataType type, long[] shape, ByteBuffer data) { + return Constant.create(scope, type, shape, data); } /** - * Creates a rank-1 constant of {@code String} elements, each represented as an array of {@code byte}s. + * This op consumes a lock created by `MutexLock`. + *

+ * This op exists to consume a tensor created by `MutexLock` (other than + * direct control dependencies). It should be the only that consumes the tensor, + * and will raise an error if it is not. Its only purpose is to keep the + * mutex lock tensor alive until it is consumed by this op. + *

+ * NOTE: This operation must run on the same device as its input. This may + * be enforced via the `colocate_with` mechanism. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param mutexLock A tensor returned by `MutexLock`. + * @return a new instance of ConsumeMutexLock */ - public Constant constant(byte[][] data) { - return Constant.create(scope, data); + public ConsumeMutexLock consumeMutexLock(Operand mutexLock) { + return ConsumeMutexLock.create(scope, mutexLock); } /** - * Creates a rank-3 constant of {@code double} elements. + * Does nothing. Serves as a control trigger for scheduling. + *

+ * Only useful as a placeholder for control edges. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @return a new instance of ControlTrigger */ - public Constant constant(double[][][] data) { - return Constant.create(scope, data); + public ControlTrigger controlTrigger() { + return ControlTrigger.create(scope); } /** - * Creates a rank-6 constant of {@code long} elements. + * Increments 'ref' until it reaches 'limit'. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code output()} output + * @param ref Should be from a scalar `Variable` node. + * @param limit If incrementing ref would bring it above limit, instead generates an + * 'OutOfRange' error. + * @return a new instance of CountUpTo */ - public Constant constant(long[][][][][][] data) { - return Constant.create(scope, data); + public CountUpTo countUpTo(Operand ref, Long limit) { + return CountUpTo.create(scope, ref, limit); } /** - * Creates a rank-5 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Makes a copy of `x`. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param data type for {@code y()} output + * @param x The source tensor of type `T`. + * @return a new instance of DeepCopy */ - public Constant constant(byte[][][][][][] data) { - return Constant.create(scope, data); + public DeepCopy deepCopy(Operand x) { + return DeepCopy.create(scope, x); } /** - * Creates a rank-4 constant of {@code float} elements. + * Delete the tensor specified by its handle in the session. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param handle The handle for a tensor stored in the session state. + * @return a new instance of DeleteSessionTensor */ - public Constant constant(float[][][][] data) { - return Constant.create(scope, data); + public DeleteSessionTensor deleteSessionTensor(Operand handle) { + return DeleteSessionTensor.create(scope, handle); } /** - * Creates a rank-1 constant of {@code boolean} elements. + * Deletes the resource specified by the handle. + *

+ * All subsequent operations using the resource will result in a NotFound + * error status. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param resource handle to the resource to delete. + * @param options carries optional attributes values + * @return a new instance of DestroyResourceOp */ - public Constant constant(boolean[] data) { - return Constant.create(scope, data); + public DestroyResourceOp destroyResourceOp(Operand resource, + DestroyResourceOp.Options... options) { + return DestroyResourceOp.create(scope, resource, options); } /** - * Creates a rank-1 constant of {@code double} elements. + * Destroys the temporary variable and returns its final value. + *

+ * Sets output to the value of the Tensor pointed to by 'ref', then destroys + * the temporary variable called 'var_name'. + * All other uses of 'ref' must have executed before this op. + * This is typically achieved by chaining the ref through each assign op, or by + * using control dependencies. + *

+ * Outputs the final value of the tensor pointed to by 'ref'. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code value()} output + * @param ref A reference to the temporary variable tensor. + * @param varName Name of the temporary variable, usually the name of the matching + * 'TemporaryVariable' op. + * @return a new instance of DestroyTemporaryVariable */ - public Constant constant(double[] data) { - return Constant.create(scope, data); + public DestroyTemporaryVariable destroyTemporaryVariable(Operand ref, + String varName) { + return DestroyTemporaryVariable.create(scope, ref, varName); } /** - * Creates a rank-6 constant of {@code boolean} elements. - * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * Partitions `data` into `num_partitions` tensors using indices from `partitions`. + *

+ * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` + * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` + * are placed in `outputs[i]` in lexicographic order of `js`, and the first + * dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. + * In detail, + *

{@code
+   *      outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
+   *
+   *      outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
+   *  }
+ * `data.shape` must start with `partitions.shape`. + *

+ * For example: + *

{@code
+   *      # Scalar partitions.
+   *      partitions = 1
+   *      num_partitions = 2
+   *      data = [10, 20]
+   *      outputs[0] = []  # Empty with shape [0, 2]
+   *      outputs[1] = [[10, 20]]
+   *
+   *      # Vector partitions.
+   *      partitions = [0, 0, 1, 1, 0]
+   *      num_partitions = 2
+   *      data = [10, 20, 30, 40, 50]
+   *      outputs[0] = [10, 20, 50]
+   *      outputs[1] = [30, 40]
+   *  }
+ * See `dynamic_stitch` for an example on how to merge partitions back. + *

+ *

+ * + *
+ * + * @param data type for {@code outputs()} output + * @param data + * @param partitions Any shape. Indices in the range `[0, num_partitions)`. + * @param numPartitions The number of partitions to output. + * @return a new instance of DynamicPartition */ - public Constant constant(boolean[][][][][][] data) { - return Constant.create(scope, data); + public DynamicPartition dynamicPartition(Operand data, + Operand partitions, Long numPartitions) { + return DynamicPartition.create(scope, data, partitions, numPartitions); } /** - * Creates a rank-4 constant of {@code boolean} elements. + * Interleave the values from the `data` tensors into a single tensor. + *

+ * Builds a merged tensor such that + *

{@code
+   *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
+   *  }
+ * For example, if each `indices[m]` is scalar or vector, we have + *
{@code
+   *      # Scalar indices:
+   *      merged[indices[m], ...] = data[m][...]
    *
-   * @param scope is a scope used to add the underlying operation.
-   * @param data An array containing the values to put into the new constant. The dimensions of the
-   *      new constant will match those of the array.
+   *      # Vector indices:
+   *      merged[indices[m][i], ...] = data[m][i, ...]
+   *  }
+ * Each `data[i].shape` must start with the corresponding `indices[i].shape`, + * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + * must have `data[i].shape = indices[i].shape + constant`. In terms of this + * `constant`, the output shape is + *

+ * merged.shape = [max(indices)] + constant + *

+ * Values are merged in order, so if an index appears in both `indices[m][i]` and + * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the + * merged result. If you do not need this guarantee, ParallelDynamicStitch might + * perform better on some devices. + *

+ * For example: + *

{@code
+   *      indices[0] = 6
+   *      indices[1] = [4, 1]
+   *      indices[2] = [[5, 2], [0, 3]]
+   *      data[0] = [61, 62]
+   *      data[1] = [[41, 42], [11, 12]]
+   *      data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
+   *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
+   *                [51, 52], [61, 62]]
+   *  }
+ * This method can be used to merge partitions created by `dynamic_partition` + * as illustrated on the following example: + *
{@code
+   *      # Apply function (increments x_i) on elements for which a certain condition
+   *      # apply (x_i != -1 in this example).
+   *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
+   *      condition_mask=tf.not_equal(x,tf.constant(-1.))
+   *      partitioned_data = tf.dynamic_partition(
+   *          x, tf.cast(condition_mask, tf.int32) , 2)
+   *      partitioned_data[1] = partitioned_data[1] + 1.0
+   *      condition_indices = tf.dynamic_partition(
+   *          tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
+   *      x = tf.dynamic_stitch(condition_indices, partitioned_data)
+   *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
+   *      # unchanged.
+   *  }
+ *
+ * + *
+ * + * @param data type for {@code merged()} output + * @param indices + * @param data + * @return a new instance of DynamicStitch */ - public Constant constant(boolean[][][][] data) { - return Constant.create(scope, data); + public DynamicStitch dynamicStitch(Iterable> indices, + Iterable> data) { + return DynamicStitch.create(scope, indices, data); } /** - * Creates a rank-6 constant of {@code float} elements. + * Computes the (possibly normalized) Levenshtein Edit Distance. + *

+ * The inputs are variable-length sequences provided by SparseTensors + * (hypothesis_indices, hypothesis_values, hypothesis_shape) + * and + * (truth_indices, truth_values, truth_shape). + *

+ * The inputs are: * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param hypothesisIndices The indices of the hypothesis list SparseTensor. + * This is an N x R int64 matrix. + * @param hypothesisValues The values of the hypothesis list SparseTensor. + * This is an N-length vector. + * @param hypothesisShape The shape of the hypothesis list SparseTensor. + * This is an R-length vector. + * @param truthIndices The indices of the truth list SparseTensor. + * This is an M x R int64 matrix. + * @param truthValues The values of the truth list SparseTensor. + * This is an M-length vector. + * @param truthShape truth indices, vector. + * @param options carries optional attributes values + * @return a new instance of EditDistance */ - public Constant constant(float[][][][][][] data) { - return Constant.create(scope, data); + public EditDistance editDistance(Operand hypothesisIndices, + Operand hypothesisValues, Operand hypothesisShape, Operand truthIndices, + Operand truthValues, Operand truthShape, EditDistance.Options... options) { + return EditDistance.create(scope, hypothesisIndices, hypothesisValues, hypothesisShape, truthIndices, truthValues, truthShape, options); } /** - * Creates a rank-2 constant of {@code long} elements. + * Creates a tensor with the given shape. + *

+ * This operation creates a tensor of `shape` and `dtype`. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code output()} output + * @param shape 1-D. Represents the shape of the output tensor. + * @param dtype + * @param options carries optional attributes values + * @return a new instance of Empty */ - public Constant constant(long[][] data) { - return Constant.create(scope, data); + public Empty empty(Operand shape, DataType dtype, + Empty.Options... options) { + return Empty.create(scope, shape, dtype, options); } /** - * Creates a rank-2 constant of {@code double} elements. + * Creates and returns an empty tensor list. + *

+ * All list elements must be tensors of dtype element_dtype and shape compatible + * with element_shape. + *

+ * handle: an empty tensor list. + * element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param elementShape + * @param maxNumElements + * @param elementDtype + * @return a new instance of EmptyTensorList */ - public Constant constant(double[][] data) { - return Constant.create(scope, data); + public EmptyTensorList emptyTensorList( + Operand elementShape, Operand maxNumElements, DataType elementDtype) { + return EmptyTensorList.create(scope, elementShape, maxNumElements, elementDtype); } /** - * Creates a rank-6 constant of {@code double} elements. + * Ensures that the tensor's shape matches the expected shape. + *

+ * Raises an error if the input tensor's shape does not match the specified shape. + * Returns the input tensor otherwise. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code output()} output + * @param input A tensor, whose shape is to be validated. + * @param shape The expected (possibly partially specified) shape of the input tensor. + * @return a new instance of EnsureShape */ - public Constant constant(double[][][][][][] data) { - return Constant.create(scope, data); + public EnsureShape ensureShape(Operand input, Shape shape) { + return EnsureShape.create(scope, input, shape); } /** - * Creates a constant containing a single {@code String} element, represented as an array of {@code byte}s. + * Inserts a dimension of 1 into a tensor's shape. + *

+ * Given a tensor `input`, this operation inserts a dimension of 1 at the + * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at + * zero; if you specify a negative number for `axis` it is counted backward from + * the end. + *

+ * This operation is useful if you want to add a batch dimension to a single + * element. For example, if you have a single image of shape `[height, width, + * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, + * which will make the shape `[1, height, width, channels]`. + *

+ * Other examples: + *

{@code
+   *  # 't' is a tensor of shape [2]
+   *  shape(expand_dims(t, 0)) ==> [1, 2]
+   *  shape(expand_dims(t, 1)) ==> [2, 1]
+   *  shape(expand_dims(t, -1)) ==> [2, 1]
    *
-   * @param scope is a scope used to add the underlying operation.
-   * @param data An array containing the values to put into the new constant. String elements are
-   *      sequences of bytes from the last array dimension.
+   *  # 't2' is a tensor of shape [2, 3, 5]
+   *  shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
+   *  shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
+   *  shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
+   *  }
+ * This operation requires that: + *

+ * `-1-input.dims() <= dim <= input.dims()` + *

+ * This operation is related to `squeeze()`, which removes dimensions of + * size 1. + * + * @param data type for {@code output()} output + * @param input + * @param axis 0-D (scalar). Specifies the dimension index at which to + * expand the shape of `input`. Must be in the range + * `[-rank(input) - 1, rank(input)]`. + * @return a new instance of ExpandDims */ - public Constant constant(byte[] data) { - return Constant.create(scope, data); + public ExpandDims expandDims(Operand input, + Operand axis) { + return ExpandDims.create(scope, input, axis); } /** - * Creates a rank-6 constant of {@code int} elements. + * Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of `extract_image_patches`. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code patches()} output + * @param input 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. + * @param ksizes The size of the sliding window for each dimension of `input`. + * @param strides 1-D of length 5. How far the centers of two consecutive patches are in + * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. + * @param padding The type of padding algorithm to use. + *

+ * We specify the size-related attributes as: + *

{@code
+   *        ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
+   *        strides = [1, stride_planes, strides_rows, strides_cols, 1]
+   *  }
+ * @return a new instance of ExtractVolumePatches */ - public Constant constant(int[][][][][][] data) { - return Constant.create(scope, data); + public ExtractVolumePatches extractVolumePatches(Operand input, + List ksizes, List strides, String padding) { + return ExtractVolumePatches.create(scope, input, ksizes, strides, padding); } /** - * Creates a rank-5 constant of {@code boolean} elements. - * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * Creates a tensor filled with a scalar value. + *

+ * This operation creates a tensor of shape `dims` and fills it with `value`. + *

+ * For example: + *

{@code
+   *  # Output tensor has shape [2, 3].
+   *  fill([2, 3], 9) ==> [[9, 9, 9]
+   *                       [9, 9, 9]]
+   *  }
+ * `tf.fill` differs from `tf.constant` in a few ways: + *
    + *
  • + * `tf.fill` only supports scalar contents, whereas `tf.constant` supports + * Tensor values. + *
  • + *
  • + * `tf.fill` creates an Op in the computation graph that constructs the actual + * Tensor value at runtime. This is in contrast to `tf.constant` which embeds + * the entire Tensor into the graph with a `Const` node. + *
  • + *
  • + * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes + * based on other runtime Tensors, unlike `tf.constant`. + * + * @param data type for {@code output()} output + * @param dims 1-D. Represents the shape of the output tensor. + * @param value 0-D (scalar). Value to fill the returned tensor. + *

    + * @compatibility(numpy) Equivalent to np.full + * @end_compatibility + * @return a new instance of Fill */ - public Constant constant(boolean[][][][][] data) { - return Constant.create(scope, data); + public Fill fill(Operand dims, Operand value) { + return Fill.create(scope, dims, value); } /** - * Creates a rank-1 constant of {@code int} elements. + * Generates fingerprint values. + *

    + * Generates fingerprint values of `data`. + *

    + * Fingerprint op considers the first dimension of `data` as the batch dimension, + * and `output[i]` contains the fingerprint value generated from contents in + * `data[i, ...]` for all `i`. + *

    + * Fingerprint op writes fingerprint values as byte arrays. For example, the + * default method `farmhash64` generates a 64-bit fingerprint value at a time. + * This 8-byte value is written out as an `uint8` array of size 8, in little-endian + * order. + *

    + * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), + * and that the fingerprint method is `farmhash64`. In this case, the output shape + * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of + * each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in + * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers + * in `data[1, :, :]`. + *

    + * Note that this op fingerprints the raw underlying buffer, and it does not + * fingerprint Tensor's metadata such as data type and/or shape. For example, the + * fingerprint values are invariant under reshapes and bitcasts as long as the + * batch dimension remain the same: + *

    {@code
    +   *  Fingerprint(data) == Fingerprint(Reshape(data, ...))
    +   *  Fingerprint(data) == Fingerprint(Bitcast(data, ...))
    +   *  }
    + * For string data, one should expect `Fingerprint(data) != + * Fingerprint(ReduceJoin(data))` in general. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data Must have rank 1 or higher. + * @param method Fingerprint method used by this op. Currently available method is + * `farmhash::fingerprint64`. + * @return a new instance of Fingerprint */ - public Constant constant(int[] data) { - return Constant.create(scope, data); + public Fingerprint fingerprint(Operand data, Operand method) { + return Fingerprint.create(scope, data, method); } /** - * Creates a rank-2 constant of {@code boolean} elements. + * Gather slices from `params` axis `axis` according to `indices`. + *

    + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `params.shape[:axis] + indices.shape + + * params.shape[axis + 1:]` where: + *

    {@code
    +   *      # Scalar indices (output is rank(params) - 1).
    +   *      output[a_0, ..., a_n, b_0, ..., b_n] =
    +   *        params[a_0, ..., a_n, indices, b_0, ..., b_n]
        *
    -   * @param scope is a scope used to add the underlying operation.
    -   * @param data An array containing the values to put into the new constant. The dimensions of the
    -   *      new constant will match those of the array.
    +   *      # Vector indices (output is rank(params)).
    +   *      output[a_0, ..., a_n, i, b_0, ..., b_n] =
    +   *        params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
    +   *
    +   *      # Higher rank indices (output is rank(params) + rank(indices) - 1).
    +   *      output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
    +   *        params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
    +   *  }
    + *
    + * + *
    + *

    + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, a 0 is stored in the + * corresponding output value. + *

    + * See also `tf.batch_gather` and `tf.gather_nd`. + * + * @param data type for {@code output()} output + * @param params The tensor from which to gather values. Must be at least rank + * `axis + 1`. + * @param indices Index tensor. Must be in range `[0, params.shape[axis])`. + * @param axis The axis in `params` to gather `indices` from. Defaults to the first + * dimension. Supports negative indexes. + * @param options carries optional attributes values + * @return a new instance of Gather */ - public Constant constant(boolean[][] data) { - return Constant.create(scope, data); + public Gather gather(Operand params, + Operand indices, Operand axis, Gather.Options... options) { + return Gather.create(scope, params, indices, axis, options); } /** - * Creates a rank-1 constant of {@code long} elements. + * Gather slices from `params` into a Tensor with shape specified by `indices`. + *

    + * `indices` is a K-dimensional integer tensor, best thought of as a + * (K-1)-dimensional tensor of indices into `params`, where each element defines a + * slice of `params`: + *

    + * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] + *

    + * Whereas in `tf.gather` `indices` defines slices into the `axis` + * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the + * first `N` dimensions of `params`, where `N = indices.shape[-1]`. + *

    + * The last dimension of `indices` can be at most the rank of + * `params`: + *

    + * indices.shape[-1] <= params.rank + *

    + * The last dimension of `indices` corresponds to elements + * (if `indices.shape[-1] == params.rank`) or slices + * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` + * of `params`. The output tensor has shape + *

    + * indices.shape[:-1] + params.shape[indices.shape[-1]:] + *

    + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, a 0 is stored in the + * corresponding output value. + *

    + * Some examples below. + *

    + * Simple indexing into a matrix: + *

    {@code
    +   *      indices = [[0, 0], [1, 1]]
    +   *      params = [['a', 'b'], ['c', 'd']]
    +   *      output = ['a', 'd']
    +   *  }
    + * Slice indexing into a matrix: + *
    {@code
    +   *      indices = [[1], [0]]
    +   *      params = [['a', 'b'], ['c', 'd']]
    +   *      output = [['c', 'd'], ['a', 'b']]
    +   *  }
    + * Indexing into a 3-tensor: + *
    {@code
    +   *      indices = [[1]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = [[['a1', 'b1'], ['c1', 'd1']]]
        *
    -   * @param scope is a scope used to add the underlying operation.
    -   * @param data An array containing the values to put into the new constant. The dimensions of the
    -   *      new constant will match those of the array.
    +   *
    +   *      indices = [[0, 1], [1, 0]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = [['c0', 'd0'], ['a1', 'b1']]
    +   *
    +   *
    +   *      indices = [[0, 0, 1], [1, 0, 1]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = ['b0', 'b1']
    +   *  }
    + * Batched indexing into a matrix: + *
    {@code
    +   *      indices = [[[0, 0]], [[0, 1]]]
    +   *      params = [['a', 'b'], ['c', 'd']]
    +   *      output = [['a'], ['b']]
    +   *  }
    + * Batched slice indexing into a matrix: + *
    {@code
    +   *      indices = [[[1]], [[0]]]
    +   *      params = [['a', 'b'], ['c', 'd']]
    +   *      output = [[['c', 'd']], [['a', 'b']]]
    +   *  }
    + * Batched indexing into a 3-tensor: + *
    {@code
    +   *      indices = [[[1]], [[0]]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = [[[['a1', 'b1'], ['c1', 'd1']]],
    +   *                [[['a0', 'b0'], ['c0', 'd0']]]]
    +   *
    +   *      indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = [[['c0', 'd0'], ['a1', 'b1']],
    +   *                [['a0', 'b0'], ['c1', 'd1']]]
    +   *
    +   *
    +   *      indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = [['b0', 'b1'], ['d0', 'c1']]
    +   *  }
    + * See also `tf.gather` and `tf.batch_gather`. + * + * @param data type for {@code output()} output + * @param params The tensor from which to gather values. + * @param indices Index tensor. + * @return a new instance of GatherNd */ - public Constant constant(long[] data) { - return Constant.create(scope, data); + public GatherNd gatherNd(Operand params, + Operand indices) { + return GatherNd.create(scope, params, indices); } /** - * Creates a rank-2 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Store the input tensor in the state of the current session. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param value The tensor to be stored. + * @return a new instance of GetSessionHandle */ - public Constant constant(byte[][][] data) { - return Constant.create(scope, data); + public GetSessionHandle getSessionHandle(Operand value) { + return GetSessionHandle.create(scope, value); } /** - * Creates a rank-2 constant of {@code float} elements. + * Get the value of the tensor specified by its handle. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code value()} output + * @param handle The handle for a tensor stored in the session state. + * @param dtype The type of the output value. + * @return a new instance of GetSessionTensor */ - public Constant constant(float[][] data) { - return Constant.create(scope, data); + public GetSessionTensor getSessionTensor(Operand handle, + DataType dtype) { + return GetSessionTensor.create(scope, handle, dtype); } /** - * Creates a rank-4 constant of {@code double} elements. + * Adds gradients computation ops to the graph according to scope. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param scope current graph scope + * @param y outputs of the function to derive + * @param x inputs of the function for which partial derivatives are computed + * @param options carries optional attributes values + * @return a new instance of {@code Gradients} + * @throws IllegalArgumentException if execution environment is not a graph */ - public Constant constant(double[][][][] data) { - return Constant.create(scope, data); + public Gradients gradients(Iterable> y, Iterable> x, + Gradients.Options... options) { + return Gradients.create(scope, y, x, options); } /** - * Creates a constant containing a single {@code float} element. + * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, + * i.e., {@code d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...} + *

    + * If {@code Options.dx()} values are set, they are as the initial symbolic partial derivatives of some loss + * function {@code L} w.r.t. {@code y}. {@code Options.dx()} must have the size of {@code y}. + *

    + * If {@code Options.dx()} is not set, the implementation will use dx of {@code OnesLike} for all + * shapes in {@code y}. + *

    + * The partial derivatives are returned in output {@code dy}, with the size of {@code x}. + *

    + * Example of usage: + *

    {@code
    +   *  Gradients gradients = tf.gradients(loss, Arrays.asList(w, b));
    +   *  Scalar alpha = ops.scalar(1.0f);
    +   *  tf.train.applyGradientDescent(w, alpha, gradients.dy(0));
    +   *  tf.train.applyGradientDescent(b, alpha, gradients.dy(1));
    +   *  }
    * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a float constant + * @param y output of the function to derive + * @param x inputs of the function for which partial derivatives are computed + * @param options carries optional attributes values + * @return a new instance of {@code Gradients} + * @throws IllegalArgumentException if execution environment is not a graph */ - public Constant constant(float data) { - return Constant.create(scope, data); + public Gradients gradients(Operand y, Iterable> x, + Gradients.Options... options) { + return Gradients.create(scope, y, x, options); } /** - * Creates a rank-1 constant of {@code float} elements. + * Gives a guarantee to the TF runtime that the input tensor is a constant. + *

    + * The runtime is then free to make optimizations based on this. + *

    + * Only accepts value typed tensors as inputs and rejects resource variable handles + * as input. + *

    + * Returns the input tensor without modification. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code output()} output + * @param input + * @return a new instance of GuaranteeConst */ - public Constant constant(float[] data) { - return Constant.create(scope, data); + public GuaranteeConst guaranteeConst(Operand input) { + return GuaranteeConst.create(scope, input); } /** - * Creates a rank-4 constant of {@code long} elements. + * Creates a non-initialized hash table. + *

    + * This op creates a hash table, specifying the type of its keys and values. + * Before using the table you will have to initialize it. After initialization the + * table will be immutable. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of HashTable */ - public Constant constant(long[][][][] data) { - return Constant.create(scope, data); + public HashTable hashTable(DataType keyDtype, + DataType valueDtype, HashTable.Options... options) { + return HashTable.create(scope, keyDtype, valueDtype, options); } /** - * Creates a constant containing a single {@code double} element. + * Return histogram of values. + *

    + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + *

    {@code
    +   *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
    +   *  nbins = 5
    +   *  value_range = [0.0, 5.0]
    +   *  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
        *
    -   * @param scope is a scope used to add the underlying operation.
    -   * @param data The value to put into the new constant.
    -   * @return a double constant
    +   *  with tf.get_default_session() as sess:
    +   *    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
    +   *    variables.global_variables_initializer().run()
    +   *    sess.run(hist) => [2, 1, 1, 0, 2]
    +   *  }
    + * + * @param data type for {@code out()} output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @return a new instance of HistogramFixedWidth */ - public Constant constant(double data) { - return Constant.create(scope, data); + public HistogramFixedWidth histogramFixedWidth(Operand values, + Operand valueRange, Operand nbins) { + return HistogramFixedWidth.create(scope, values, valueRange, nbins); } /** - * Creates a rank-2 constant of {@code int} elements. + * Return histogram of values. + *

    + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + *

    {@code
    +   *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
    +   *  nbins = 5
    +   *  value_range = [0.0, 5.0]
    +   *  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
        *
    -   * @param scope is a scope used to add the underlying operation.
    -   * @param data An array containing the values to put into the new constant. The dimensions of the
    -   *      new constant will match those of the array.
    +   *  with tf.get_default_session() as sess:
    +   *    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
    +   *    variables.global_variables_initializer().run()
    +   *    sess.run(hist) => [2, 1, 1, 0, 2]
    +   *  }
    + * + * @param data type for {@code out()} output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @param dtype + * @return a new instance of HistogramFixedWidth */ - public Constant constant(int[][] data) { - return Constant.create(scope, data); + public HistogramFixedWidth histogramFixedWidth( + Operand values, Operand valueRange, Operand nbins, DataType dtype) { + return HistogramFixedWidth.create(scope, values, valueRange, nbins, dtype); } /** - * Creates a rank-5 constant of {@code float} elements. + * Return a tensor with the same shape and contents as the input tensor or value. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code output()} output + * @param input + * @return a new instance of Identity */ - public Constant constant(float[][][][][] data) { - return Constant.create(scope, data); + public Identity identity(Operand input) { + return Identity.create(scope, input); } /** - * Creates a rank-5 constant of {@code double} elements. + * Returns a list of tensors with the same shapes and contents as the input + *

    + * tensors. + *

    + * This op can be used to override the gradient for complicated functions. For + * example, suppose y = f(x) and we wish to apply a custom function g for backprop + * such that dx = g(dy). In Python, + *

    {@code
    +   *  with tf.get_default_graph().gradient_override_map(
    +   *      {'IdentityN': 'OverrideGradientWithG'}):
    +   *    y, _ = identity_n([f(x), x])
        *
    -   * @param scope is a scope used to add the underlying operation.
    -   * @param data An array containing the values to put into the new constant. The dimensions of the
    -   *      new constant will match those of the array.
    +   * @tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _):
    +   *    return [None, g(dy)]  # Do not backprop to f(x).
    +   *  }
    + * @param input + * @return a new instance of IdentityN */ - public Constant constant(double[][][][][] data) { - return Constant.create(scope, data); + public IdentityN identityN(Iterable> input) { + return IdentityN.create(scope, input); } /** - * Creates a {@code String} constant using the default, UTF-8 encoding. + * Returns immutable tensor from memory region. + *

    + * The current implementation memmaps the tensor from a file. * - * @param scope is a scope used to add the underlying operation. - * @param data The string to put into the new constant. - * @return a string constant + * @param data type for {@code tensor()} output + * @param dtype Type of the returned tensor. + * @param shape Shape of the returned tensor. + * @param memoryRegionName Name of readonly memory region used by the tensor, see + * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + * @return a new instance of ImmutableConst */ - public Constant constant(String data) { - return Constant.create(scope, data); + public ImmutableConst immutableConst(DataType dtype, Shape shape, + String memoryRegionName) { + return ImmutableConst.create(scope, dtype, shape, memoryRegionName); } /** - * Creates a rank-3 constant of {@code boolean} elements. + * Table initializer that takes two tensors for keys and values respectively. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param tableHandle Handle to a table which will be initialized. + * @param keys Keys of type Tkey. + * @param values Values of type Tval. + * @return a new instance of InitializeTable */ - public Constant constant(boolean[][][] data) { - return Constant.create(scope, data); + public InitializeTable initializeTable(Operand tableHandle, + Operand keys, Operand values) { + return InitializeTable.create(scope, tableHandle, keys, values); } /** - * Creates a rank-3 constant of {@code float} elements. + * Initializes a table from a text file. + *

    + * It inserts one key-value pair into the table for each line of the file. + * The key and value is extracted from the whole line content, elements from the + * split line based on `delimiter` or the line number (starting from zero). + * Where to extract the key and value from a line is specified by `key_index` and + * `value_index`. + *

    + * - A value of -1 means use the line number(starting from zero), expects `int64`. + * - A value of -2 means use the whole line content, expects `string`. + * - A value >= 0 means use the index (starting at zero) of the split line based + * on `delimiter`. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param tableHandle Handle to a table which will be initialized. + * @param filename Filename of a vocabulary text file. + * @param keyIndex Column index in a line to get the table `key` values from. + * @param valueIndex Column index that represents information of a line to get the table + * `value` values from. + * @param options carries optional attributes values + * @return a new instance of InitializeTableFromTextFile */ - public Constant constant(float[][][] data) { - return Constant.create(scope, data); + public InitializeTableFromTextFile initializeTableFromTextFile(Operand tableHandle, + Operand filename, Long keyIndex, Long valueIndex, + InitializeTableFromTextFile.Options... options) { + return InitializeTableFromTextFile.create(scope, tableHandle, filename, keyIndex, valueIndex, options); } /** - * Creates a rank-5 constant of {@code int} elements. + * Adds v into specified rows of x. + *

    + * Computes y = x; y[i, :] += v; return y. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code y()} output + * @param x A `Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + * @return a new instance of InplaceAdd */ - public Constant constant(int[][][][][] data) { - return Constant.create(scope, data); + public InplaceAdd inplaceAdd(Operand x, Operand i, Operand v) { + return InplaceAdd.create(scope, x, i, v); } /** - * Creates a constant containing a single {@code long} element. + * Subtracts `v` into specified rows of `x`. + *

    + * Computes y = x; y[i, :] -= v; return y. * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a long constant + * @param data type for {@code y()} output + * @param x A `Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + * @return a new instance of InplaceSub */ - public Constant constant(long data) { - return Constant.create(scope, data); + public InplaceSub inplaceSub(Operand x, Operand i, Operand v) { + return InplaceSub.create(scope, x, i, v); } /** - * Create a constant from a Tensor. + * Updates specified rows with values in `v`. + *

    + * Computes `x[i, :] = v; return x`. * - * @param scope is a scope used to add the underlying operation. - * @param tensor a Tensor holding the constant value - * @return a constant of the same data type as `tensor` + * @param data type for {@code y()} output + * @param x A tensor of type `T`. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + * @return a new instance of InplaceUpdate */ - public Constant constant(Tensor tensor) { - return Constant.create(scope, tensor); + public InplaceUpdate inplaceUpdate(Operand x, Operand i, + Operand v) { + return InplaceUpdate.create(scope, x, i, v); } /** - * Creates a {@code String} constant using a specified encoding. + * Checks whether a tensor has been initialized. + *

    + * Outputs boolean scalar indicating whether the tensor has been initialized. * - * @param scope is a scope used to add the underlying operation. - * @param charset The encoding from String to bytes. - * @param data The string to put into the new constant. - * @return a string constant + * @param ref Should be from a `Variable` node. May be uninitialized. + * @return a new instance of IsVariableInitialized */ - public Constant constant(String data, Charset charset) { - return Constant.create(scope, data, charset); + public IsVariableInitialized isVariableInitialized(Operand ref) { + return IsVariableInitialized.create(scope, ref); } /** - * Create a constant from a Java object. - * - *

    The argument {@code object} is first converted into a Tensor using {@link - * org.tensorflow.Tensor#create(Object)}, so only Objects supported by this method must be - * provided. For example: - * + * Generates values in an interval. + *

    + * A sequence of `num` evenly-spaced values are generated beginning at `start`. + * If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, + * so that the last one is exactly `stop`. + *

    + * For example: *

    {@code
    -   *  Constant.create(scope, new int[]{{1, 2}, {3, 4}}, TInt32.DTYPE); // returns a 2x2 integer matrix
    +   *  tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
        *  }
    * - * @param scope is a scope used to add the underlying operation. - * @param object a Java object representing the constant. - * @return a constant of type `type` - * @see org.tensorflow.Tensor#create(Object) Tensor.create + * @param data type for {@code output()} output + * @param start 0-D tensor. First entry in the range. + * @param stop 0-D tensor. Last entry in the range. + * @param num 0-D tensor. Number of values to generate. + * @return a new instance of LinSpace */ - public Constant constant(Object object, DataType type) { - return Constant.create(scope, object, type); + public LinSpace linSpace(Operand start, + Operand stop, Operand num) { + return LinSpace.create(scope, start, stop, num); } /** - * Create a {@link TFloat64} constant with data from the given buffer. - * - *

    Creates a constant with the given shape by copying elements from the buffer (starting from - * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents - * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this - * method. + * Outputs all keys and values in the table. * - * @param scope is a scope used to add the underlying operation. - * @param shape the tensor shape. - * @param data a buffer containing the tensor data. - * @return a double constant - * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @param data type for {@code keys()} output + * @param data type for {@code values()} output + * @param tableHandle Handle to the table. + * @param Tkeys + * @param Tvalues + * @return a new instance of LookupTableExport */ - public Constant constant(long[] shape, DoubleBuffer data) { - return Constant.create(scope, shape, data); + public LookupTableExport lookupTableExport( + Operand tableHandle, DataType Tkeys, DataType Tvalues) { + return LookupTableExport.create(scope, tableHandle, Tkeys, Tvalues); } /** - * Create a {@link TInt32} constant with data from the given buffer. - * - *

    Creates a constant with the given shape by copying elements from the buffer (starting from - * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents - * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this - * method. + * Looks up keys in a table, outputs the corresponding values. + *

    + * The tensor `keys` must of the same type as the keys of the table. + * The output `values` is of the type of the table values. + *

    + * The scalar `default_value` is the value output for keys not present in the + * table. It must also be of the same type as the table values. * - * @param scope is a scope used to add the underlying operation. - * @param shape the tensor shape. - * @param data a buffer containing the tensor data. - * @return an integer constant - * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @param data type for {@code values()} output + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param defaultValue + * @return a new instance of LookupTableFind */ - public Constant constant(long[] shape, IntBuffer data) { - return Constant.create(scope, shape, data); + public LookupTableFind lookupTableFind( + Operand tableHandle, Operand keys, Operand defaultValue) { + return LookupTableFind.create(scope, tableHandle, keys, defaultValue); } /** - * Create a {@link TInt64} constant with data from the given buffer. - * - *

    Creates a constant with the given shape by copying elements from the buffer (starting from - * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents - * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this - * method. + * Replaces the contents of the table with the specified keys and values. + *

    + * The tensor `keys` must be of the same type as the keys of the table. + * The tensor `values` must be of the type of the table values. * - * @param scope is a scope used to add the underlying operation. - * @param shape the tensor shape. - * @param data a buffer containing the tensor data. - * @return a long constant - * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param values Values to associate with keys. + * @return a new instance of LookupTableImport */ - public Constant constant(long[] shape, LongBuffer data) { - return Constant.create(scope, shape, data); + public LookupTableImport lookupTableImport( + Operand tableHandle, Operand keys, Operand values) { + return LookupTableImport.create(scope, tableHandle, keys, values); } /** - * Create a {@link TFloat32} constant with data from the given buffer. - * - *

    Creates a constant with the given shape by copying elements from the buffer (starting from - * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents - * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this - * method. + * Updates the table to associates keys with values. + *

    + * The tensor `keys` must be of the same type as the keys of the table. + * The tensor `values` must be of the type of the table values. * - * @param scope is a scope used to add the underlying operation. - * @param shape the tensor shape. - * @param data a buffer containing the tensor data. - * @return a float constant - * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param values Values to associate with keys. + * @return a new instance of LookupTableInsert */ - public Constant constant(long[] shape, FloatBuffer data) { - return Constant.create(scope, shape, data); + public LookupTableInsert lookupTableInsert( + Operand tableHandle, Operand keys, Operand values) { + return LookupTableInsert.create(scope, tableHandle, keys, values); } /** - * Create a constant with data from the given buffer. - * - *

    Creates a Constant with the provided shape of any type where the constant data has been - * encoded into {@code data} as per the specification of the TensorFlow C - * API. + * Computes the number of elements in the given table. * - * @param scope is a scope used to add the underlying operation. - * @param type the tensor datatype. - * @param shape the tensor shape. - * @param data a buffer containing the tensor data. - * @return a constant of type `type` - * @throws IllegalArgumentException If the tensor datatype or shape is not compatible with the - * buffer + * @param tableHandle Handle to the table. + * @return a new instance of LookupTableSize */ - public Constant constant(DataType type, long[] shape, ByteBuffer data) { - return Constant.create(scope, type, shape, data); + public LookupTableSize lookupTableSize(Operand tableHandle) { + return LookupTableSize.create(scope, tableHandle); } /** - * This op consumes a lock created by `MutexLock`. - *

    - * This op exists to consume a tensor created by `MutexLock` (other than - * direct control dependencies). It should be the only that consumes the tensor, - * and will raise an error if it is not. Its only purpose is to keep the - * mutex lock tensor alive until it is consumed by this op. + * Forwards the input to the output. *

    - * NOTE: This operation must run on the same device as its input. This may - * be enforced via the `colocate_with` mechanism. + * This operator represents the loop termination condition used by the + * "pivot" switches of a loop. * - * @param mutexLock A tensor returned by `MutexLock`. - * @return a new instance of ConsumeMutexLock + * @param input A boolean scalar, representing the branch predicate of the Switch op. + * @return a new instance of LoopCond */ - public ConsumeMutexLock consumeMutexLock(Operand mutexLock) { - return ConsumeMutexLock.create(scope, mutexLock); + public LoopCond loopCond(Operand input) { + return LoopCond.create(scope, input); } /** - * Does nothing. Serves as a control trigger for scheduling. - *

    - * Only useful as a placeholder for control edges. + * Op removes all elements in the underlying container. * - * @return a new instance of ControlTrigger + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapClear */ - public ControlTrigger controlTrigger() { - return ControlTrigger.create(scope); + public MapClear mapClear(List> dtypes, MapClear.Options... options) { + return MapClear.create(scope, dtypes, options); } /** - * Increments 'ref' until it reaches 'limit'. + * Op returns the number of incomplete elements in the underlying container. * - * @param data type for {@code output()} output - * @param ref Should be from a scalar `Variable` node. - * @param limit If incrementing ref would bring it above limit, instead generates an - * 'OutOfRange' error. - * @return a new instance of CountUpTo + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapIncompleteSize */ - public CountUpTo countUpTo(Operand ref, Long limit) { - return CountUpTo.create(scope, ref, limit); + public MapIncompleteSize mapIncompleteSize(List> dtypes, + MapIncompleteSize.Options... options) { + return MapIncompleteSize.create(scope, dtypes, options); } /** - * Makes a copy of `x`. + * Op peeks at the values at the specified key. If the + *

    + * underlying container does not contain this key + * this op will block until it does. * - * @param data type for {@code y()} output - * @param x The source tensor of type `T`. - * @return a new instance of DeepCopy + * @param key + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapPeek */ - public DeepCopy deepCopy(Operand x) { - return DeepCopy.create(scope, x); + public MapPeek mapPeek(Operand key, Operand indices, List> dtypes, + MapPeek.Options... options) { + return MapPeek.create(scope, key, indices, dtypes, options); } /** - * Delete the tensor specified by its handle in the session. + * Op returns the number of elements in the underlying container. * - * @param handle The handle for a tensor stored in the session state. - * @return a new instance of DeleteSessionTensor + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapSize */ - public DeleteSessionTensor deleteSessionTensor(Operand handle) { - return DeleteSessionTensor.create(scope, handle); + public MapSize mapSize(List> dtypes, MapSize.Options... options) { + return MapSize.create(scope, dtypes, options); } /** - * Deletes the resource specified by the handle. - *

    - * All subsequent operations using the resource will result in a NotFound - * error status. + * Stage (key, values) in the underlying container which behaves like a hashtable. * - * @param resource handle to the resource to delete. + * @param key int64 + * @param indices + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. + * @param dtypes * @param options carries optional attributes values - * @return a new instance of DestroyResourceOp + * @return a new instance of MapStage */ - public DestroyResourceOp destroyResourceOp(Operand resource, - DestroyResourceOp.Options... options) { - return DestroyResourceOp.create(scope, resource, options); + public MapStage mapStage(Operand key, Operand indices, + Iterable> values, List> dtypes, MapStage.Options... options) { + return MapStage.create(scope, key, indices, values, dtypes, options); } /** - * Destroys the temporary variable and returns its final value. + * Op removes and returns the values associated with the key *

    - * Sets output to the value of the Tensor pointed to by 'ref', then destroys - * the temporary variable called 'var_name'. - * All other uses of 'ref' must have executed before this op. - * This is typically achieved by chaining the ref through each assign op, or by - * using control dependencies. - *

    - * Outputs the final value of the tensor pointed to by 'ref'. + * from the underlying container. If the underlying container + * does not contain this key, the op will block until it does. * - * @param data type for {@code value()} output - * @param ref A reference to the temporary variable tensor. - * @param varName Name of the temporary variable, usually the name of the matching - * 'TemporaryVariable' op. - * @return a new instance of DestroyTemporaryVariable + * @param key + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapUnstage */ - public DestroyTemporaryVariable destroyTemporaryVariable(Operand ref, - String varName) { - return DestroyTemporaryVariable.create(scope, ref, varName); + public MapUnstage mapUnstage(Operand key, Operand indices, + List> dtypes, MapUnstage.Options... options) { + return MapUnstage.create(scope, key, indices, dtypes, options); } /** - * Partitions `data` into `num_partitions` tensors using indices from `partitions`. - *

    - * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` - * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` - * are placed in `outputs[i]` in lexicographic order of `js`, and the first - * dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. - * In detail, - *

    {@code
    -   *      outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
    -   *
    -   *      outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
    -   *  }
    - * `data.shape` must start with `partitions.shape`. - *

    - * For example: - *

    {@code
    -   *      # Scalar partitions.
    -   *      partitions = 1
    -   *      num_partitions = 2
    -   *      data = [10, 20]
    -   *      outputs[0] = []  # Empty with shape [0, 2]
    -   *      outputs[1] = [[10, 20]]
    -   *
    -   *      # Vector partitions.
    -   *      partitions = [0, 0, 1, 1, 0]
    -   *      num_partitions = 2
    -   *      data = [10, 20, 30, 40, 50]
    -   *      outputs[0] = [10, 20, 50]
    -   *      outputs[1] = [30, 40]
    -   *  }
    - * See `dynamic_stitch` for an example on how to merge partitions back. + * Op removes and returns a random (key, value) *

    - *

    - * - *
    + * from the underlying container. If the underlying container + * does not contain elements, the op will block until it does. * - * @param data type for {@code outputs()} output - * @param data - * @param partitions Any shape. Indices in the range `[0, num_partitions)`. - * @param numPartitions The number of partitions to output. - * @return a new instance of DynamicPartition + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapUnstageNoKey */ - public DynamicPartition dynamicPartition(Operand data, - Operand partitions, Long numPartitions) { - return DynamicPartition.create(scope, data, partitions, numPartitions); + public MapUnstageNoKey mapUnstageNoKey(Operand indices, List> dtypes, + MapUnstageNoKey.Options... options) { + return MapUnstageNoKey.create(scope, indices, dtypes, options); } /** - * Interleave the values from the `data` tensors into a single tensor. - *

    - * Builds a merged tensor such that - *

    {@code
    -   *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
    -   *  }
    - * For example, if each `indices[m]` is scalar or vector, we have - *
    {@code
    -   *      # Scalar indices:
    -   *      merged[indices[m], ...] = data[m][...]
    -   *
    -   *      # Vector indices:
    -   *      merged[indices[m][i], ...] = data[m][i, ...]
    -   *  }
    - * Each `data[i].shape` must start with the corresponding `indices[i].shape`, - * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we - * must have `data[i].shape = indices[i].shape + constant`. In terms of this - * `constant`, the output shape is - *

    - * merged.shape = [max(indices)] + constant - *

    - * Values are merged in order, so if an index appears in both `indices[m][i]` and - * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the - * merged result. If you do not need this guarantee, ParallelDynamicStitch might - * perform better on some devices. + * Computes the maximum of elements across dimensions of a tensor. *

    - * For example: - *

    {@code
    -   *      indices[0] = 6
    -   *      indices[1] = [4, 1]
    -   *      indices[2] = [[5, 2], [0, 3]]
    -   *      data[0] = [61, 62]
    -   *      data[1] = [[41, 42], [11, 12]]
    -   *      data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
    -   *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
    -   *                [51, 52], [61, 62]]
    -   *  }
    - * This method can be used to merge partitions created by `dynamic_partition` - * as illustrated on the following example: - *
    {@code
    -   *      # Apply function (increments x_i) on elements for which a certain condition
    -   *      # apply (x_i != -1 in this example).
    -   *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
    -   *      condition_mask=tf.not_equal(x,tf.constant(-1.))
    -   *      partitioned_data = tf.dynamic_partition(
    -   *          x, tf.cast(condition_mask, tf.int32) , 2)
    -   *      partitioned_data[1] = partitioned_data[1] + 1.0
    -   *      condition_indices = tf.dynamic_partition(
    -   *          tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
    -   *      x = tf.dynamic_stitch(condition_indices, partitioned_data)
    -   *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
    -   *      # unchanged.
    -   *  }
    - *
    - * - *
    + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param data type for {@code merged()} output - * @param indices - * @param data - * @return a new instance of DynamicStitch + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of Max */ - public DynamicStitch dynamicStitch(Iterable> indices, - Iterable> data) { - return DynamicStitch.create(scope, indices, data); + public Max max(Operand input, Operand axis, + Max.Options... options) { + return Max.create(scope, input, axis, options); } /** - * Computes the (possibly normalized) Levenshtein Edit Distance. + * Forwards the value of an available tensor from `inputs` to `output`. *

    - * The inputs are variable-length sequences provided by SparseTensors - * (hypothesis_indices, hypothesis_values, hypothesis_shape) - * and - * (truth_indices, truth_values, truth_shape). + * `Merge` waits for at least one of the tensors in `inputs` to become available. + * It is usually combined with `Switch` to implement branching. *

    - * The inputs are: + * `Merge` forwards the first tensor to become available to `output`, and sets + * `value_index` to its index in `inputs`. * - * @param hypothesisIndices The indices of the hypothesis list SparseTensor. - * This is an N x R int64 matrix. - * @param hypothesisValues The values of the hypothesis list SparseTensor. - * This is an N-length vector. - * @param hypothesisShape The shape of the hypothesis list SparseTensor. - * This is an R-length vector. - * @param truthIndices The indices of the truth list SparseTensor. - * This is an M x R int64 matrix. - * @param truthValues The values of the truth list SparseTensor. - * This is an M-length vector. - * @param truthShape truth indices, vector. - * @param options carries optional attributes values - * @return a new instance of EditDistance + * @param data type for {@code output()} output + * @param inputs The input tensors, exactly one of which will become available. + * @return a new instance of Merge */ - public EditDistance editDistance(Operand hypothesisIndices, - Operand hypothesisValues, Operand hypothesisShape, Operand truthIndices, - Operand truthValues, Operand truthShape, EditDistance.Options... options) { - return EditDistance.create(scope, hypothesisIndices, hypothesisValues, hypothesisShape, truthIndices, truthValues, truthShape, options); + public Merge merge(Iterable> inputs) { + return Merge.create(scope, inputs); } /** - * Creates a tensor with the given shape. + * Computes the minimum of elements across dimensions of a tensor. *

    - * This operation creates a tensor of `shape` and `dtype`. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * * @param data type for {@code output()} output - * @param shape 1-D. Represents the shape of the output tensor. - * @param dtype + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of Empty + * @return a new instance of Min */ - public Empty empty(Operand shape, DataType dtype, - Empty.Options... options) { - return Empty.create(scope, shape, dtype, options); + public Min min(Operand input, Operand axis, + Min.Options... options) { + return Min.create(scope, input, axis, options); } /** - * Creates and returns an empty tensor list. + * Pads a tensor with mirrored values. *

    - * All list elements must be tensors of dtype element_dtype and shape compatible - * with element_shape. + * This operation pads a `input` with mirrored values according to the `paddings` + * you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is + * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + * how many values to add before the contents of `input` in that dimension, and + * `paddings[D, 1]` indicates how many values to add after the contents of `input` + * in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater + * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true + * (if false, respectively). *

    - * handle: an empty tensor list. - * element_dtype: the type of elements in the list. - * element_shape: a shape compatible with that of elements in the list. + * The padded size of each dimension D of the output is: + *

    + * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + *

    + * For example: + *

    {@code
    +   *  # 't' is [[1, 2, 3], [4, 5, 6]].
    +   *  # 'paddings' is [[1, 1]], [2, 2]].
    +   *  # 'mode' is SYMMETRIC.
    +   *  # rank of 't' is 2.
    +   *  pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
    +   *                        [2, 1, 1, 2, 3, 3, 2]
    +   *                        [5, 4, 4, 5, 6, 6, 5]
    +   *                        [5, 4, 4, 5, 6, 6, 5]]
    +   *  }
    * - * @param elementShape - * @param maxNumElements - * @param elementDtype - * @return a new instance of EmptyTensorList + * @param data type for {@code output()} output + * @param input The input tensor to be padded. + * @param paddings A two-column matrix specifying the padding sizes. The number of + * rows must be the same as the rank of `input`. + * @param mode Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions + * do not include the borders, while in symmetric mode the padded regions + * do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` + * is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and + * it is `[1, 2, 3, 3, 2]` in symmetric mode. + * @return a new instance of MirrorPad */ - public EmptyTensorList emptyTensorList( - Operand elementShape, Operand maxNumElements, DataType elementDtype) { - return EmptyTensorList.create(scope, elementShape, maxNumElements, elementDtype); + public MirrorPad mirrorPad(Operand input, + Operand paddings, String mode) { + return MirrorPad.create(scope, input, paddings, mode); } /** - * Ensures that the tensor's shape matches the expected shape. + * Wraps an arbitrary MLIR computation expressed as a module with a main() function. *

    - * Raises an error if the input tensor's shape does not match the specified shape. - * Returns the input tensor otherwise. - * - * @param data type for {@code output()} output - * @param input A tensor, whose shape is to be validated. - * @param shape The expected (possibly partially specified) shape of the input tensor. - * @return a new instance of EnsureShape + * This operation does not have an associated kernel and is not intended to be + * executed in a regular TensorFlow session. Instead it is intended to be used for + * testing or for special case where a user intends to pass custom MLIR computation + * through a TensorFlow graph with the intent of having custom tooling processing + * it downstream (when targeting a different environment, like TensorFlow lite for + * example). + * The MLIR module is expected to have a main() function that will be used as an + * entry point. The inputs to the operations will be passed as argument to the + * main() function and the returned values of the main function mapped to the + * outputs. + * Example usage: + *

    {@code
    +   *  import tensorflow as tf
    +   *  from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
    +   *
    +   *  mlir_module = '''python
    +   *  func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
    +   *     %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
    +   *     return %ret : tensor<10x10xf32>
    +   *  }
    +   *  '''
    +   *
    +   * @tf.function def foo(x, y):
    +   *    return = mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
    +   *
    +   *  graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
    +   *  }
    + * @param inputs + * @param mlirModule + * @param Toutputs + * @return a new instance of MlirPassthroughOp */ - public EnsureShape ensureShape(Operand input, Shape shape) { - return EnsureShape.create(scope, input, shape); + public MlirPassthroughOp mlirPassthroughOp(Iterable> inputs, String mlirModule, + List> Toutputs) { + return MlirPassthroughOp.create(scope, inputs, mlirModule, Toutputs); } /** - * Inserts a dimension of 1 into a tensor's shape. - *

    - * Given a tensor `input`, this operation inserts a dimension of 1 at the - * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at - * zero; if you specify a negative number for `axis` it is counted backward from - * the end. - *

    - * This operation is useful if you want to add a batch dimension to a single - * element. For example, if you have a single image of shape `[height, width, - * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, - * which will make the shape `[1, height, width, channels]`. - *

    - * Other examples: - *

    {@code
    -   *  # 't' is a tensor of shape [2]
    -   *  shape(expand_dims(t, 0)) ==> [1, 2]
    -   *  shape(expand_dims(t, 1)) ==> [2, 1]
    -   *  shape(expand_dims(t, -1)) ==> [2, 1]
    -   *
    -   *  # 't2' is a tensor of shape [2, 3, 5]
    -   *  shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
    -   *  shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
    -   *  shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
    -   *  }
    - * This operation requires that: + * Creates an empty hash table that uses tensors as the backing store. *

    - * `-1-input.dims() <= dim <= input.dims()` + * It uses "open addressing" with quadratic reprobing to resolve + * collisions. *

    - * This operation is related to `squeeze()`, which removes dimensions of - * size 1. + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. * - * @param data type for {@code output()} output - * @param input - * @param axis 0-D (scalar). Specifies the dimension index at which to - * expand the shape of `input`. Must be in the range - * `[-rank(input) - 1, rank(input)]`. - * @return a new instance of ExpandDims + * @param emptyKey The key used to represent empty key buckets internally. Must not + * be used in insert or lookup operations. + * @param deletedKey + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableDenseHashTable */ - public ExpandDims expandDims(Operand input, - Operand axis) { - return ExpandDims.create(scope, input, axis); + public MutableDenseHashTable mutableDenseHashTable( + Operand emptyKey, Operand deletedKey, DataType valueDtype, + MutableDenseHashTable.Options... options) { + return MutableDenseHashTable.create(scope, emptyKey, deletedKey, valueDtype, options); } /** - * Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of `extract_image_patches`. - * - * @param data type for {@code patches()} output - * @param input 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. - * @param ksizes The size of the sliding window for each dimension of `input`. - * @param strides 1-D of length 5. How far the centers of two consecutive patches are in - * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. - * @param padding The type of padding algorithm to use. + * Creates an empty hash table. *

    - * We specify the size-related attributes as: - *

    {@code
    -   *        ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
    -   *        strides = [1, stride_planes, strides_rows, strides_cols, 1]
    -   *  }
    - * @return a new instance of ExtractVolumePatches + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableHashTable */ - public ExtractVolumePatches extractVolumePatches(Operand input, - List ksizes, List strides, String padding) { - return ExtractVolumePatches.create(scope, input, ksizes, strides, padding); + public MutableHashTable mutableHashTable(DataType keyDtype, + DataType valueDtype, MutableHashTable.Options... options) { + return MutableHashTable.create(scope, keyDtype, valueDtype, options); } /** - * Creates a tensor filled with a scalar value. - *

    - * This operation creates a tensor of shape `dims` and fills it with `value`. + * Creates an empty hash table. *

    - * For example: - *

    {@code
    -   *  # Output tensor has shape [2, 3].
    -   *  fill([2, 3], 9) ==> [[9, 9, 9]
    -   *                       [9, 9, 9]]
    -   *  }
    - * `tf.fill` differs from `tf.constant` in a few ways: - *
      - *
    • - * `tf.fill` only supports scalar contents, whereas `tf.constant` supports - * Tensor values. - *
    • - *
    • - * `tf.fill` creates an Op in the computation graph that constructs the actual - * Tensor value at runtime. This is in contrast to `tf.constant` which embeds - * the entire Tensor into the graph with a `Const` node. - *
    • - *
    • - * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes - * based on other runtime Tensors, unlike `tf.constant`. + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a vector. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. * - * @param data type for {@code output()} output - * @param dims 1-D. Represents the shape of the output tensor. - * @param value 0-D (scalar). Value to fill the returned tensor. - *

      - * @compatibility(numpy) Equivalent to np.full - * @end_compatibility - * @return a new instance of Fill + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableHashTableOfTensors */ - public Fill fill(Operand dims, Operand value) { - return Fill.create(scope, dims, value); + public MutableHashTableOfTensors mutableHashTableOfTensors( + DataType keyDtype, DataType valueDtype, MutableHashTableOfTensors.Options... options) { + return MutableHashTableOfTensors.create(scope, keyDtype, valueDtype, options); } /** - * Generates fingerprint values. - *

      - * Generates fingerprint values of `data`. - *

      - * Fingerprint op considers the first dimension of `data` as the batch dimension, - * and `output[i]` contains the fingerprint value generated from contents in - * `data[i, ...]` for all `i`. - *

      - * Fingerprint op writes fingerprint values as byte arrays. For example, the - * default method `farmhash64` generates a 64-bit fingerprint value at a time. - * This 8-byte value is written out as an `uint8` array of size 8, in little-endian - * order. - *

      - * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), - * and that the fingerprint method is `farmhash64`. In this case, the output shape - * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of - * each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in - * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers - * in `data[1, :, :]`. - *

      - * Note that this op fingerprints the raw underlying buffer, and it does not - * fingerprint Tensor's metadata such as data type and/or shape. For example, the - * fingerprint values are invariant under reshapes and bitcasts as long as the - * batch dimension remain the same: - *

      {@code
      -   *  Fingerprint(data) == Fingerprint(Reshape(data, ...))
      -   *  Fingerprint(data) == Fingerprint(Bitcast(data, ...))
      -   *  }
      - * For string data, one should expect `Fingerprint(data) != - * Fingerprint(ReduceJoin(data))` in general. + * Creates a Mutex resource that can be locked by `MutexLock`. * - * @param data Must have rank 1 or higher. - * @param method Fingerprint method used by this op. Currently available method is - * `farmhash::fingerprint64`. - * @return a new instance of Fingerprint + * @param options carries optional attributes values + * @return a new instance of Mutex */ - public Fingerprint fingerprint(Operand data, Operand method) { - return Fingerprint.create(scope, data, method); + public Mutex mutex(Mutex.Options... options) { + return Mutex.create(scope, options); } /** - * Gather slices from `params` axis `axis` according to `indices`. + * Locks a mutex resource. The output is the lock. So long as the lock tensor *

      - * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape `params.shape[:axis] + indices.shape + - * params.shape[axis + 1:]` where: + * is alive, any other request to use `MutexLock` with this mutex will wait. + *

      + * This is particularly useful for creating a critical section when used in + * conjunction with `MutexLockIdentity`: *

      {@code
      -   *      # Scalar indices (output is rank(params) - 1).
      -   *      output[a_0, ..., a_n, b_0, ..., b_n] =
      -   *        params[a_0, ..., a_n, indices, b_0, ..., b_n]
      +   *  mutex = mutex_v2(
      +   *    shared_name=handle_name, container=container, name=name)
          *
      -   *      # Vector indices (output is rank(params)).
      -   *      output[a_0, ..., a_n, i, b_0, ..., b_n] =
      -   *        params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
      +   *  def execute_in_critical_section(fn, *args, **kwargs):
      +   *    lock = gen_resource_variable_ops.mutex_lock(mutex)
          *
      -   *      # Higher rank indices (output is rank(params) + rank(indices) - 1).
      -   *      output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
      -   *        params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
      +   *    with ops.control_dependencies([lock]):
      +   *      r = fn(*args, **kwargs)
      +   *
      +   *    with ops.control_dependencies(nest.flatten(r)):
      +   *      with ops.colocate_with(mutex):
      +   *        ensure_lock_exists = mutex_lock_identity(lock)
      +   *
      +   *      # Make sure that if any element of r is accessed, all of
      +   *      # them are executed together.
      +   *      r = nest.map_structure(tf.identity, r)
      +   *
      +   *    with ops.control_dependencies([ensure_lock_exists]):
      +   *      return nest.map_structure(tf.identity, r)
          *  }
      - *
      - * - *
      + * While `fn` is running in the critical section, no other functions which wish to + * use this critical section may run. *

      - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, a 0 is stored in the - * corresponding output value. + * Often the use case is that two executions of the same graph, in parallel, + * wish to run `fn`; and we wish to ensure that only one of them executes + * at a time. This is especially important if `fn` modifies one or more + * variables at a time. *

      - * See also `tf.batch_gather` and `tf.gather_nd`. + * It is also useful if two separate functions must share a resource, but we + * wish to ensure the usage is exclusive. + * + * @param mutex The mutex resource to lock. + * @return a new instance of MutexLock + */ + public MutexLock mutexLock(Operand mutex) { + return MutexLock.create(scope, mutex); + } + + /** + * Makes its input available to the next iteration. * * @param data type for {@code output()} output - * @param params The tensor from which to gather values. Must be at least rank - * `axis + 1`. - * @param indices Index tensor. Must be in range `[0, params.shape[axis])`. - * @param axis The axis in `params` to gather `indices` from. Defaults to the first - * dimension. Supports negative indexes. - * @param options carries optional attributes values - * @return a new instance of Gather + * @param data The tensor to be made available to the next iteration. + * @return a new instance of NextIteration */ - public Gather gather(Operand params, - Operand indices, Operand axis, Gather.Options... options) { - return Gather.create(scope, params, indices, axis, options); + public NextIteration nextIteration(Operand data) { + return NextIteration.create(scope, data); } /** - * Gather slices from `params` into a Tensor with shape specified by `indices`. - *

      - * `indices` is a K-dimensional integer tensor, best thought of as a - * (K-1)-dimensional tensor of indices into `params`, where each element defines a - * slice of `params`: - *

      - * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] - *

      - * Whereas in `tf.gather` `indices` defines slices into the `axis` - * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the - * first `N` dimensions of `params`, where `N = indices.shape[-1]`. - *

      - * The last dimension of `indices` can be at most the rank of - * `params`: - *

      - * indices.shape[-1] <= params.rank + * Does nothing. Only useful as a placeholder for control edges. + * + * @return a new instance of NoOp + */ + public NoOp noOp() { + return NoOp.create(scope); + } + + /** + * Returns a one-hot tensor. *

      - * The last dimension of `indices` corresponds to elements - * (if `indices.shape[-1] == params.rank`) or slices - * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` - * of `params`. The output tensor has shape + * The locations represented by indices in `indices` take value `on_value`, + * while all other locations take value `off_value`. *

      - * indices.shape[:-1] + params.shape[indices.shape[-1]:] + * If the input `indices` is rank `N`, the output will have rank `N+1`, + * The new axis is created at dimension `axis` (default: the new axis is + * appended at the end). *

      - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, a 0 is stored in the - * corresponding output value. + * If `indices` is a scalar the output shape will be a vector of length `depth`. *

      - * Some examples below. + * If `indices` is a vector of length `features`, the output shape will be: + *

      {@code
      +   *    features x depth if axis == -1
      +   *    depth x features if axis == 0
      +   *  }
      + * If `indices` is a matrix (batch) with shape `[batch, features]`, + * the output shape will be: + *
      {@code
      +   *    batch x features x depth if axis == -1
      +   *    batch x depth x features if axis == 1
      +   *    depth x batch x features if axis == 0
      +   *  }
      + * Examples + * ========= *

      - * Simple indexing into a matrix: + * Suppose that *

      {@code
      -   *      indices = [[0, 0], [1, 1]]
      -   *      params = [['a', 'b'], ['c', 'd']]
      -   *      output = ['a', 'd']
      +   *    indices = [0, 2, -1, 1]
      +   *    depth = 3
      +   *    on_value = 5.0
      +   *    off_value = 0.0
      +   *    axis = -1
          *  }
      - * Slice indexing into a matrix: + * Then output is `[4 x 3]`: *
      {@code
      -   *      indices = [[1], [0]]
      -   *      params = [['a', 'b'], ['c', 'd']]
      -   *      output = [['c', 'd'], ['a', 'b']]
      +   *  output =
      +   *    [5.0 0.0 0.0]  // one_hot(0)
      +   *    [0.0 0.0 5.0]  // one_hot(2)
      +   *    [0.0 0.0 0.0]  // one_hot(-1)
      +   *    [0.0 5.0 0.0]  // one_hot(1)
          *  }
      - * Indexing into a 3-tensor: + * Suppose that *
      {@code
      -   *      indices = [[1]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = [[['a1', 'b1'], ['c1', 'd1']]]
      -   *
      -   *
      -   *      indices = [[0, 1], [1, 0]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = [['c0', 'd0'], ['a1', 'b1']]
      -   *
      -   *
      -   *      indices = [[0, 0, 1], [1, 0, 1]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = ['b0', 'b1']
      +   *    indices = [0, 2, -1, 1]
      +   *    depth = 3
      +   *    on_value = 0.0
      +   *    off_value = 3.0
      +   *    axis = 0
          *  }
      - * Batched indexing into a matrix: + * Then output is `[3 x 4]`: *
      {@code
      -   *      indices = [[[0, 0]], [[0, 1]]]
      -   *      params = [['a', 'b'], ['c', 'd']]
      -   *      output = [['a'], ['b']]
      +   *  output =
      +   *    [0.0 3.0 3.0 3.0]
      +   *    [3.0 3.0 3.0 0.0]
      +   *    [3.0 3.0 3.0 3.0]
      +   *    [3.0 0.0 3.0 3.0]
      +   *  //  ^                one_hot(0)
      +   *  //      ^            one_hot(2)
      +   *  //          ^        one_hot(-1)
      +   *  //              ^    one_hot(1)
          *  }
      - * Batched slice indexing into a matrix: + * Suppose that *
      {@code
      -   *      indices = [[[1]], [[0]]]
      -   *      params = [['a', 'b'], ['c', 'd']]
      -   *      output = [[['c', 'd']], [['a', 'b']]]
      +   *    indices = [[0, 2], [1, -1]]
      +   *    depth = 3
      +   *    on_value = 1.0
      +   *    off_value = 0.0
      +   *    axis = -1
          *  }
      - * Batched indexing into a 3-tensor: + * Then output is `[2 x 2 x 3]`: *
      {@code
      -   *      indices = [[[1]], [[0]]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = [[[['a1', 'b1'], ['c1', 'd1']]],
      -   *                [[['a0', 'b0'], ['c0', 'd0']]]]
      -   *
      -   *      indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = [[['c0', 'd0'], ['a1', 'b1']],
      -   *                [['a0', 'b0'], ['c1', 'd1']]]
      -   *
      -   *
      -   *      indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = [['b0', 'b1'], ['d0', 'c1']]
      +   *  output =
      +   *    [
      +   *      [1.0, 0.0, 0.0]  // one_hot(0)
      +   *      [0.0, 0.0, 1.0]  // one_hot(2)
      +   *    ][
      +   *      [0.0, 1.0, 0.0]  // one_hot(1)
      +   *      [0.0, 0.0, 0.0]  // one_hot(-1)
      +   *    ]
          *  }
      - * See also `tf.gather` and `tf.batch_gather`. * - * @param data type for {@code output()} output - * @param params The tensor from which to gather values. - * @param indices Index tensor. - * @return a new instance of GatherNd + * @param data type for {@code output()} output + * @param indices A tensor of indices. + * @param depth A scalar defining the depth of the one hot dimension. + * @param onValue A scalar defining the value to fill in output when `indices[j] = i`. + * @param offValue A scalar defining the value to fill in output when `indices[j] != i`. + * @param options carries optional attributes values + * @return a new instance of OneHot */ - public GatherNd gatherNd(Operand params, - Operand indices) { - return GatherNd.create(scope, params, indices); + public OneHot oneHot(Operand indices, + Operand depth, Operand onValue, Operand offValue, OneHot.Options... options) { + return OneHot.create(scope, indices, depth, onValue, offValue, options); } /** - * Store the input tensor in the state of the current session. + * Returns a tensor of ones with the same shape and type as x. * - * @param value The tensor to be stored. - * @return a new instance of GetSessionHandle + * @param data type for {@code y()} output + * @param x a tensor of type T. + * @return a new instance of OnesLike */ - public GetSessionHandle getSessionHandle(Operand value) { - return GetSessionHandle.create(scope, value); + public OnesLike onesLike(Operand x) { + return OnesLike.create(scope, x); } /** - * Get the value of the tensor specified by its handle. + * Op removes all elements in the underlying container. * - * @param data type for {@code value()} output - * @param handle The handle for a tensor stored in the session state. - * @param dtype The type of the output value. - * @return a new instance of GetSessionTensor + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapClear */ - public GetSessionTensor getSessionTensor(Operand handle, - DataType dtype) { - return GetSessionTensor.create(scope, handle, dtype); + public OrderedMapClear orderedMapClear(List> dtypes, + OrderedMapClear.Options... options) { + return OrderedMapClear.create(scope, dtypes, options); } /** - * Adds gradients computation ops to the graph according to scope. + * Op returns the number of incomplete elements in the underlying container. * - * @param scope current graph scope - * @param y outputs of the function to derive - * @param x inputs of the function for which partial derivatives are computed + * @param dtypes * @param options carries optional attributes values - * @return a new instance of {@code Gradients} - * @throws IllegalArgumentException if execution environment is not a graph + * @return a new instance of OrderedMapIncompleteSize */ - public Gradients gradients(Iterable> y, Iterable> x, - Gradients.Options... options) { - return Gradients.create(scope, y, x, options); + public OrderedMapIncompleteSize orderedMapIncompleteSize(List> dtypes, + OrderedMapIncompleteSize.Options... options) { + return OrderedMapIncompleteSize.create(scope, dtypes, options); } /** - * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, - * i.e., {@code d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...} - *

      - * If {@code Options.dx()} values are set, they are as the initial symbolic partial derivatives of some loss - * function {@code L} w.r.t. {@code y}. {@code Options.dx()} must have the size of {@code y}. - *

      - * If {@code Options.dx()} is not set, the implementation will use dx of {@code OnesLike} for all - * shapes in {@code y}. - *

      - * The partial derivatives are returned in output {@code dy}, with the size of {@code x}. + * Op peeks at the values at the specified key. If the *

      - * Example of usage: - *

      {@code
      -   *  Gradients gradients = Gradients.create(scope, Arrays.asList(loss), Arrays.asList(w, b));
      -   *
      -   *  Constant alpha = ops.constant(1.0f, Float.class);
      -   *  ApplyGradientDescent.create(scope, w, alpha, gradients.dy(0));
      -   *  ApplyGradientDescent.create(scope, b, alpha, gradients.dy(1));
      -   *  }
      + * underlying container does not contain this key + * this op will block until it does. This Op is optimized for + * performance. * - * @param y output of the function to derive - * @param x inputs of the function for which partial derivatives are computed + * @param key + * @param indices + * @param dtypes * @param options carries optional attributes values - * @return a new instance of {@code Gradients} - * @throws IllegalArgumentException if execution environment is not a graph + * @return a new instance of OrderedMapPeek */ - public Gradients gradients(Operand y, Iterable> x, - Gradients.Options... options) { - return Gradients.create(scope, y, x, options); + public OrderedMapPeek orderedMapPeek(Operand key, Operand indices, + List> dtypes, OrderedMapPeek.Options... options) { + return OrderedMapPeek.create(scope, key, indices, dtypes, options); } /** - * Gives a guarantee to the TF runtime that the input tensor is a constant. - *

      - * The runtime is then free to make optimizations based on this. - *

      - * Only accepts value typed tensors as inputs and rejects resource variable handles - * as input. - *

      - * Returns the input tensor without modification. + * Op returns the number of elements in the underlying container. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of GuaranteeConst + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapSize */ - public GuaranteeConst guaranteeConst(Operand input) { - return GuaranteeConst.create(scope, input); + public OrderedMapSize orderedMapSize(List> dtypes, + OrderedMapSize.Options... options) { + return OrderedMapSize.create(scope, dtypes, options); } /** - * Creates a non-initialized hash table. + * Stage (key, values) in the underlying container which behaves like a ordered *

      - * This op creates a hash table, specifying the type of its keys and values. - * Before using the table you will have to initialize it. After initialization the - * table will be immutable. + * associative container. Elements are ordered by key. * - * @param keyDtype Type of the table keys. - * @param valueDtype Type of the table values. + * @param key int64 + * @param indices + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. + * @param dtypes * @param options carries optional attributes values - * @return a new instance of HashTable + * @return a new instance of OrderedMapStage */ - public HashTable hashTable(DataType keyDtype, - DataType valueDtype, HashTable.Options... options) { - return HashTable.create(scope, keyDtype, valueDtype, options); + public OrderedMapStage orderedMapStage(Operand key, Operand indices, + Iterable> values, List> dtypes, OrderedMapStage.Options... options) { + return OrderedMapStage.create(scope, key, indices, values, dtypes, options); } /** - * Return histogram of values. + * Op removes and returns the values associated with the key *

      - * Given the tensor `values`, this operation returns a rank 1 histogram counting - * the number of entries in `values` that fall into every bin. The bins are - * equal width and determined by the arguments `value_range` and `nbins`. - *

      {@code
      -   *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
      -   *  nbins = 5
      -   *  value_range = [0.0, 5.0]
      -   *  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
      -   *
      -   *  with tf.get_default_session() as sess:
      -   *    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
      -   *    variables.global_variables_initializer().run()
      -   *    sess.run(hist) => [2, 1, 1, 0, 2]
      -   *  }
      + * from the underlying container. If the underlying container + * does not contain this key, the op will block until it does. * - * @param data type for {@code out()} output - * @param values Numeric `Tensor`. - * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. - * values <= value_range[0] will be mapped to hist[0], - * values >= value_range[1] will be mapped to hist[-1]. - * @param nbins Scalar `int32 Tensor`. Number of histogram bins. - * @return a new instance of HistogramFixedWidth + * @param key + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapUnstage */ - public HistogramFixedWidth histogramFixedWidth(Operand values, - Operand valueRange, Operand nbins) { - return HistogramFixedWidth.create(scope, values, valueRange, nbins); + public OrderedMapUnstage orderedMapUnstage(Operand key, Operand indices, + List> dtypes, OrderedMapUnstage.Options... options) { + return OrderedMapUnstage.create(scope, key, indices, dtypes, options); } /** - * Return histogram of values. + * Op removes and returns the (key, value) element with the smallest *

      - * Given the tensor `values`, this operation returns a rank 1 histogram counting - * the number of entries in `values` that fall into every bin. The bins are - * equal width and determined by the arguments `value_range` and `nbins`. - *

      {@code
      -   *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
      -   *  nbins = 5
      -   *  value_range = [0.0, 5.0]
      -   *  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
      -   *
      -   *  with tf.get_default_session() as sess:
      -   *    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
      -   *    variables.global_variables_initializer().run()
      -   *    sess.run(hist) => [2, 1, 1, 0, 2]
      -   *  }
      + * key from the underlying container. If the underlying container + * does not contain elements, the op will block until it does. * - * @param data type for {@code out()} output - * @param values Numeric `Tensor`. - * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. - * values <= value_range[0] will be mapped to hist[0], - * values >= value_range[1] will be mapped to hist[-1]. - * @param nbins Scalar `int32 Tensor`. Number of histogram bins. - * @param dtype - * @return a new instance of HistogramFixedWidth + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapUnstageNoKey */ - public HistogramFixedWidth histogramFixedWidth( - Operand values, Operand valueRange, Operand nbins, DataType dtype) { - return HistogramFixedWidth.create(scope, values, valueRange, nbins, dtype); + public OrderedMapUnstageNoKey orderedMapUnstageNoKey(Operand indices, + List> dtypes, OrderedMapUnstageNoKey.Options... options) { + return OrderedMapUnstageNoKey.create(scope, indices, dtypes, options); } /** - * Return a tensor with the same shape and contents as the input tensor or value. + * Pads a tensor. + *

      + * This operation pads `input` according to the `paddings` and `constant_values` + * you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is + * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + * how many padding values to add before the contents of `input` in that dimension, + * and `paddings[D, 1]` indicates how many padding values to add after the contents + * of `input` in that dimension. `constant_values` is a scalar tensor of the same + * type as `input` that indicates the value to use for padding `input`. + *

      + * The padded size of each dimension D of the output is: + *

      + * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + *

      + * For example: + *

      {@code
      +   *  # 't' is [[1, 1], [2, 2]]
      +   *  # 'paddings' is [[1, 1], [2, 2]]
      +   *  # 'constant_values' is 0
      +   *  # rank of 't' is 2
      +   *  pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
      +   *                        [0, 0, 1, 1, 0, 0]
      +   *                        [0, 0, 2, 2, 0, 0]
      +   *                        [0, 0, 0, 0, 0, 0]]
      +   *  }
      * * @param data type for {@code output()} output * @param input - * @return a new instance of Identity + * @param paddings + * @param constantValues + * @return a new instance of Pad */ - public Identity identity(Operand input) { - return Identity.create(scope, input); + public Pad pad(Operand input, Operand paddings, + Operand constantValues) { + return Pad.create(scope, input, paddings, constantValues); } /** - * Returns a list of tensors with the same shapes and contents as the input + * Concatenates a list of `N` tensors along the first dimension. *

      - * tensors. + * The input tensors are all required to have size 1 in the first dimension. *

      - * This op can be used to override the gradient for complicated functions. For - * example, suppose y = f(x) and we wish to apply a custom function g for backprop - * such that dx = g(dy). In Python, + * For example: *

      {@code
      -   *  with tf.get_default_graph().gradient_override_map(
      -   *      {'IdentityN': 'OverrideGradientWithG'}):
      -   *    y, _ = identity_n([f(x), x])
      -   *
      -   * @tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _):
      -   *    return [None, g(dy)]  # Do not backprop to f(x).
      +   *  # 'x' is [[1, 4]]
      +   *  # 'y' is [[2, 5]]
      +   *  # 'z' is [[3, 6]]
      +   *  parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
          *  }
      - * @param input - * @return a new instance of IdentityN - */ - public IdentityN identityN(Iterable> input) { - return IdentityN.create(scope, input); - } - - /** - * Returns immutable tensor from memory region. - *

      - * The current implementation memmaps the tensor from a file. + * The difference between concat and parallel_concat is that concat requires all + * of the inputs be computed before the operation will begin but doesn't require + * that the input shapes be known during graph construction. Parallel concat + * will copy pieces of the input into the output as they become available, in + * some situations this can provide a performance benefit. * - * @param data type for {@code tensor()} output - * @param dtype Type of the returned tensor. - * @param shape Shape of the returned tensor. - * @param memoryRegionName Name of readonly memory region used by the tensor, see - * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. - * @return a new instance of ImmutableConst + * @param data type for {@code output()} output + * @param values Tensors to be concatenated. All must have size 1 in the first dimension + * and same shape. + * @param shape the final shape of the result; should be equal to the shapes of any input + * but with the number of input values in the first dimension. + * @return a new instance of ParallelConcat */ - public ImmutableConst immutableConst(DataType dtype, Shape shape, - String memoryRegionName) { - return ImmutableConst.create(scope, dtype, shape, memoryRegionName); + public ParallelConcat parallelConcat(Iterable> values, + Shape shape) { + return ParallelConcat.create(scope, values, shape); } /** - * Table initializer that takes two tensors for keys and values respectively. + * Interleave the values from the `data` tensors into a single tensor. + *

      + * Builds a merged tensor such that + *

      {@code
      +   *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
      +   *  }
      + * For example, if each `indices[m]` is scalar or vector, we have + *
      {@code
      +   *      # Scalar indices:
      +   *      merged[indices[m], ...] = data[m][...]
          *
      -   * @param tableHandle Handle to a table which will be initialized.
      -   * @param keys Keys of type Tkey.
      -   * @param values Values of type Tval.
      -   * @return a new instance of InitializeTable
      -   */
      -  public  InitializeTable initializeTable(Operand tableHandle,
      -      Operand keys, Operand values) {
      -    return InitializeTable.create(scope, tableHandle, keys, values);
      -  }
      -
      -  /**
      -   * Initializes a table from a text file.
      +   *      # Vector indices:
      +   *      merged[indices[m][i], ...] = data[m][i, ...]
      +   *  }
      + * Each `data[i].shape` must start with the corresponding `indices[i].shape`, + * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + * must have `data[i].shape = indices[i].shape + constant`. In terms of this + * `constant`, the output shape is *

      - * It inserts one key-value pair into the table for each line of the file. - * The key and value is extracted from the whole line content, elements from the - * split line based on `delimiter` or the line number (starting from zero). - * Where to extract the key and value from a line is specified by `key_index` and - * `value_index`. + * merged.shape = [max(indices)] + constant *

      - * - A value of -1 means use the line number(starting from zero), expects `int64`. - * - A value of -2 means use the whole line content, expects `string`. - * - A value >= 0 means use the index (starting at zero) of the split line based - * on `delimiter`. - * - * @param tableHandle Handle to a table which will be initialized. - * @param filename Filename of a vocabulary text file. - * @param keyIndex Column index in a line to get the table `key` values from. - * @param valueIndex Column index that represents information of a line to get the table - * `value` values from. - * @param options carries optional attributes values - * @return a new instance of InitializeTableFromTextFile + * Values may be merged in parallel, so if an index appears in both `indices[m][i]` + * and `indices[n][j]`, the result may be invalid. This differs from the normal + * DynamicStitch operator that defines the behavior in that case. + *

      + * For example: + *

      {@code
      +   *      indices[0] = 6
      +   *      indices[1] = [4, 1]
      +   *      indices[2] = [[5, 2], [0, 3]]
      +   *      data[0] = [61, 62]
      +   *      data[1] = [[41, 42], [11, 12]]
      +   *      data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
      +   *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
      +   *                [51, 52], [61, 62]]
      +   *  }
      + * This method can be used to merge partitions created by `dynamic_partition` + * as illustrated on the following example: + *
      {@code
      +   *      # Apply function (increments x_i) on elements for which a certain condition
      +   *      # apply (x_i != -1 in this example).
      +   *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
      +   *      condition_mask=tf.not_equal(x,tf.constant(-1.))
      +   *      partitioned_data = tf.dynamic_partition(
      +   *          x, tf.cast(condition_mask, tf.int32) , 2)
      +   *      partitioned_data[1] = partitioned_data[1] + 1.0
      +   *      condition_indices = tf.dynamic_partition(
      +   *          tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
      +   *      x = tf.dynamic_stitch(condition_indices, partitioned_data)
      +   *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
      +   *      # unchanged.
      +   *  }
      + *
      + * + *
      + * + * @param data type for {@code merged()} output + * @param indices + * @param data + * @return a new instance of ParallelDynamicStitch */ - public InitializeTableFromTextFile initializeTableFromTextFile(Operand tableHandle, - Operand filename, Long keyIndex, Long valueIndex, - InitializeTableFromTextFile.Options... options) { - return InitializeTableFromTextFile.create(scope, tableHandle, filename, keyIndex, valueIndex, options); + public ParallelDynamicStitch parallelDynamicStitch( + Iterable> indices, Iterable> data) { + return ParallelDynamicStitch.create(scope, indices, data); } /** - * Adds v into specified rows of x. + * A placeholder op for a value that will be fed into the computation. *

      - * Computes y = x; y[i, :] += v; return y. + * N.B. This operation will fail with an error if it is executed. It is + * intended as a way to represent a value that will always be fed, and to + * provide attrs that enable the fed value to be checked at runtime. * - * @param data type for {@code y()} output - * @param x A `Tensor` of type T. - * @param i A vector. Indices into the left-most dimension of `x`. - * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. - * @return a new instance of InplaceAdd + * @param data type for {@code output()} output + * @param dtype The type of elements in the tensor. + * @param options carries optional attributes values + * @return a new instance of Placeholder */ - public InplaceAdd inplaceAdd(Operand x, Operand i, Operand v) { - return InplaceAdd.create(scope, x, i, v); + public Placeholder placeholder(DataType dtype, + Placeholder.Options... options) { + return Placeholder.create(scope, dtype, options); } /** - * Subtracts `v` into specified rows of `x`. + * A placeholder op that passes through `input` when its output is not fed. + * + * @param data type for {@code output()} output + * @param input The default value to produce when `output` is not fed. + * @param shape The (possibly partial) shape of the tensor. + * @return a new instance of PlaceholderWithDefault + */ + public PlaceholderWithDefault placeholderWithDefault(Operand input, + Shape shape) { + return PlaceholderWithDefault.create(scope, input, shape); + } + + /** + * Prints a string scalar. *

      - * Computes y = x; y[i, :] -= v; return y. + * Prints a string scalar to the desired output_stream. * - * @param data type for {@code y()} output - * @param x A `Tensor` of type T. - * @param i A vector. Indices into the left-most dimension of `x`. - * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. - * @return a new instance of InplaceSub + * @param input The string scalar to print. + * @param options carries optional attributes values + * @return a new instance of Print */ - public InplaceSub inplaceSub(Operand x, Operand i, Operand v) { - return InplaceSub.create(scope, x, i, v); + public Print print(Operand input, Print.Options... options) { + return Print.create(scope, input, options); } /** - * Updates specified rows with values in `v`. + * Computes the product of elements across dimensions of a tensor. *

      - * Computes `x[i, :] = v; return x`. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param data type for {@code y()} output - * @param x A tensor of type `T`. - * @param i A vector. Indices into the left-most dimension of `x`. - * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. - * @return a new instance of InplaceUpdate + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of Prod */ - public InplaceUpdate inplaceUpdate(Operand x, Operand i, - Operand v) { - return InplaceUpdate.create(scope, x, i, v); + public Prod prod(Operand input, Operand axis, + Prod.Options... options) { + return Prod.create(scope, input, axis, options); } /** - * Checks whether a tensor has been initialized. + * Reshapes a quantized tensor as per the Reshape op. *

      - * Outputs boolean scalar indicating whether the tensor has been initialized. + * ``` * - * @param ref Should be from a `Variable` node. May be uninitialized. - * @return a new instance of IsVariableInitialized + * @param data type for {@code output()} output + * @param tensor + * @param shape Defines the shape of the output tensor. + * @param inputMin The minimum value of the input. + * @param inputMax The maximum value of the input. + * @return a new instance of QuantizedReshape */ - public IsVariableInitialized isVariableInitialized(Operand ref) { - return IsVariableInitialized.create(scope, ref); + public QuantizedReshape quantizedReshape( + Operand tensor, Operand shape, Operand inputMin, Operand inputMax) { + return QuantizedReshape.create(scope, tensor, shape, inputMin, inputMax); } /** - * Generates values in an interval. + * Creates a sequence of numbers. *

      - * A sequence of `num` evenly-spaced values are generated beginning at `start`. - * If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, - * so that the last one is exactly `stop`. + * This operation creates a sequence of numbers that begins at `start` and + * extends by increments of `delta` up to but not including `limit`. *

      * For example: *

      {@code
      -   *  tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
      +   *  # 'start' is 3
      +   *  # 'limit' is 18
      +   *  # 'delta' is 3
      +   *  tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
          *  }
      * * @param data type for {@code output()} output - * @param start 0-D tensor. First entry in the range. - * @param stop 0-D tensor. Last entry in the range. - * @param num 0-D tensor. Number of values to generate. - * @return a new instance of LinSpace + * @param start 0-D (scalar). First entry in the sequence. + * @param limit 0-D (scalar). Upper limit of sequence, exclusive. + * @param delta 0-D (scalar). Optional. Default is 1. Number that increments `start`. + * @return a new instance of Range */ - public LinSpace linSpace(Operand start, - Operand stop, Operand num) { - return LinSpace.create(scope, start, stop, num); + public Range range(Operand start, Operand limit, Operand delta) { + return Range.create(scope, start, limit, delta); } /** - * Outputs all keys and values in the table. + * Returns the rank of a tensor. + *

      + * This operation returns an integer representing the rank of `input`. + *

      + * For example: + *

      {@code
      +   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
      +   *  # shape of tensor 't' is [2, 2, 3]
      +   *  rank(t) ==> 3
      +   *  }
      + * Note: The rank of a tensor is not the same as the rank of a matrix. The rank + * of a tensor is the number of indices required to uniquely select each element + * of the tensor. Rank is also known as "order", "degree", or "ndims." * - * @param data type for {@code keys()} output - * @param data type for {@code values()} output - * @param tableHandle Handle to the table. - * @param Tkeys - * @param Tvalues - * @return a new instance of LookupTableExport + * @param input + * @return a new instance of Rank */ - public LookupTableExport lookupTableExport( - Operand tableHandle, DataType Tkeys, DataType Tvalues) { - return LookupTableExport.create(scope, tableHandle, Tkeys, Tvalues); + public Rank rank(Operand input) { + return Rank.create(scope, input); } /** - * Looks up keys in a table, outputs the corresponding values. + * Reads the value of a variable. *

      - * The tensor `keys` must of the same type as the keys of the table. - * The output `values` is of the type of the table values. + * The tensor returned by this operation is immutable. *

      - * The scalar `default_value` is the value output for keys not present in the - * table. It must also be of the same type as the table values. + * The value returned by this operation is guaranteed to be influenced by all the + * writes on which this operation depends directly or indirectly, and to not be + * influenced by any of the writes which depend directly or indirectly on this + * operation. * - * @param data type for {@code values()} output - * @param tableHandle Handle to the table. - * @param keys Any shape. Keys to look up. - * @param defaultValue - * @return a new instance of LookupTableFind + * @param data type for {@code value()} output + * @param resource handle to the resource in which to store the variable. + * @param dtype the dtype of the value. + * @return a new instance of ReadVariableOp */ - public LookupTableFind lookupTableFind( - Operand tableHandle, Operand keys, Operand defaultValue) { - return LookupTableFind.create(scope, tableHandle, keys, defaultValue); + public ReadVariableOp readVariableOp(Operand resource, + DataType dtype) { + return ReadVariableOp.create(scope, resource, dtype); } /** - * Replaces the contents of the table with the specified keys and values. + * Computes the "logical and" of elements across dimensions of a tensor. *

      - * The tensor `keys` must be of the same type as the keys of the table. - * The tensor `values` must be of the type of the table values. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param tableHandle Handle to the table. - * @param keys Any shape. Keys to look up. - * @param values Values to associate with keys. - * @return a new instance of LookupTableImport + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of ReduceAll */ - public LookupTableImport lookupTableImport( - Operand tableHandle, Operand keys, Operand values) { - return LookupTableImport.create(scope, tableHandle, keys, values); + public ReduceAll reduceAll(Operand input, Operand axis, + ReduceAll.Options... options) { + return ReduceAll.create(scope, input, axis, options); } /** - * Updates the table to associates keys with values. + * Computes the "logical or" of elements across dimensions of a tensor. *

      - * The tensor `keys` must be of the same type as the keys of the table. - * The tensor `values` must be of the type of the table values. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param tableHandle Handle to the table. - * @param keys Any shape. Keys to look up. - * @param values Values to associate with keys. - * @return a new instance of LookupTableInsert + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of ReduceAny */ - public LookupTableInsert lookupTableInsert( - Operand tableHandle, Operand keys, Operand values) { - return LookupTableInsert.create(scope, tableHandle, keys, values); - } - - /** - * Computes the number of elements in the given table. - * - * @param tableHandle Handle to the table. - * @return a new instance of LookupTableSize - */ - public LookupTableSize lookupTableSize(Operand tableHandle) { - return LookupTableSize.create(scope, tableHandle); + public ReduceAny reduceAny(Operand input, Operand axis, + ReduceAny.Options... options) { + return ReduceAny.create(scope, input, axis, options); } /** - * Forwards the input to the output. + * Computes the maximum of elements across dimensions of a tensor. *

      - * This operator represents the loop termination condition used by the - * "pivot" switches of a loop. - * - * @param input A boolean scalar, representing the branch predicate of the Switch op. - * @return a new instance of LoopCond - */ - public LoopCond loopCond(Operand input) { - return LoopCond.create(scope, input); - } - - /** - * Op removes all elements in the underlying container. - * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of MapClear - */ - public MapClear mapClear(List> dtypes, MapClear.Options... options) { - return MapClear.create(scope, dtypes, options); - } - - /** - * Op returns the number of incomplete elements in the underlying container. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param dtypes + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of MapIncompleteSize + * @return a new instance of ReduceMax */ - public MapIncompleteSize mapIncompleteSize(List> dtypes, - MapIncompleteSize.Options... options) { - return MapIncompleteSize.create(scope, dtypes, options); + public ReduceMax reduceMax(Operand input, + Operand axis, ReduceMax.Options... options) { + return ReduceMax.create(scope, input, axis, options); } /** - * Op peeks at the values at the specified key. If the + * Computes the minimum of elements across dimensions of a tensor. *

      - * underlying container does not contain this key - * this op will block until it does. - * - * @param key - * @param indices - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of MapPeek - */ - public MapPeek mapPeek(Operand key, Operand indices, List> dtypes, - MapPeek.Options... options) { - return MapPeek.create(scope, key, indices, dtypes, options); - } - - /** - * Op returns the number of elements in the underlying container. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param dtypes + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of MapSize + * @return a new instance of ReduceMin */ - public MapSize mapSize(List> dtypes, MapSize.Options... options) { - return MapSize.create(scope, dtypes, options); + public ReduceMin reduceMin(Operand input, + Operand axis, ReduceMin.Options... options) { + return ReduceMin.create(scope, input, axis, options); } /** - * Stage (key, values) in the underlying container which behaves like a hashtable. + * Computes the product of elements across dimensions of a tensor. + *

      + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param key int64 - * @param indices - * @param values a list of tensors - * dtypes A list of data types that inserted values should adhere to. - * @param dtypes + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of MapStage + * @return a new instance of ReduceProd */ - public MapStage mapStage(Operand key, Operand indices, - Iterable> values, List> dtypes, MapStage.Options... options) { - return MapStage.create(scope, key, indices, values, dtypes, options); + public ReduceProd reduceProd(Operand input, + Operand axis, ReduceProd.Options... options) { + return ReduceProd.create(scope, input, axis, options); } /** - * Op removes and returns the values associated with the key + * Computes the sum of elements across dimensions of a tensor. *

      - * from the underlying container. If the underlying container - * does not contain this key, the op will block until it does. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param key - * @param indices - * @param dtypes + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of MapUnstage + * @return a new instance of ReduceSum */ - public MapUnstage mapUnstage(Operand key, Operand indices, - List> dtypes, MapUnstage.Options... options) { - return MapUnstage.create(scope, key, indices, dtypes, options); + public ReduceSum reduceSum(Operand input, + Operand axis, ReduceSum.Options... options) { + return ReduceSum.create(scope, input, axis, options); } /** - * Op removes and returns a random (key, value) - *

      - * from the underlying container. If the underlying container - * does not contain elements, the op will block until it does. + * Makes its input available to the next iteration. * - * @param indices - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of MapUnstageNoKey + * @param data type for {@code output()} output + * @param data The tensor to be made available to the next iteration. + * @return a new instance of RefNextIteration */ - public MapUnstageNoKey mapUnstageNoKey(Operand indices, List> dtypes, - MapUnstageNoKey.Options... options) { - return MapUnstageNoKey.create(scope, indices, dtypes, options); + public RefNextIteration refNextIteration(Operand data) { + return RefNextIteration.create(scope, data); } /** - * Computes the maximum of elements across dimensions of a tensor. - *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * Forwards the `index`th element of `inputs` to `output`. * * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values - * @return a new instance of Max + * @param index A scalar that determines the input that gets selected. + * @param inputs A list of ref tensors, one of which will be forwarded to `output`. + * @return a new instance of RefSelect */ - public Max max(Operand input, Operand axis, - Max.Options... options) { - return Max.create(scope, input, axis, options); + public RefSelect refSelect(Operand index, + Iterable> inputs) { + return RefSelect.create(scope, index, inputs); } /** - * Forwards the value of an available tensor from `inputs` to `output`. + * Forwards the ref tensor `data` to the output port determined by `pred`. *

      - * `Merge` waits for at least one of the tensors in `inputs` to become available. - * It is usually combined with `Switch` to implement branching. + * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + * the data goes to `output_false`. *

      - * `Merge` forwards the first tensor to become available to `output`, and sets - * `value_index` to its index in `inputs`. + * See also `Switch` and `Merge`. * - * @param data type for {@code output()} output - * @param inputs The input tensors, exactly one of which will become available. - * @return a new instance of Merge + * @param data type for {@code outputFalse()} output + * @param data The ref tensor to be forwarded to the appropriate output. + * @param pred A scalar that specifies which output port will receive data. + * @return a new instance of RefSwitch */ - public Merge merge(Iterable> inputs) { - return Merge.create(scope, inputs); + public RefSwitch refSwitch(Operand data, Operand pred) { + return RefSwitch.create(scope, data, pred); } /** - * Computes the minimum of elements across dimensions of a tensor. + * Execute a sub graph on a remote processor. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * The graph specifications(such as graph itself, input tensors and output names) + * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo + * as serialized_remote_fused_graph_execute_info. + * The specifications will be passed to a dedicated registered + * remote fused graph executor. The executor will send the graph specifications + * to a remote processor and execute that graph. The execution results + * will be passed to consumer nodes as outputs of this node. * - * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values - * @return a new instance of Min + * @param inputs Arbitrary number of tensors with arbitrary data types + * @param Toutputs + * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer + * of RemoteFusedGraphExecuteInfo which contains graph specifications. + * @return a new instance of RemoteFusedGraphExecute */ - public Min min(Operand input, Operand axis, - Min.Options... options) { - return Min.create(scope, input, axis, options); + public RemoteFusedGraphExecute remoteFusedGraphExecute(Iterable> inputs, + List> Toutputs, String serializedRemoteFusedGraphExecuteInfo) { + return RemoteFusedGraphExecute.create(scope, inputs, Toutputs, serializedRemoteFusedGraphExecuteInfo); } /** - * Pads a tensor with mirrored values. + * Reshapes a tensor. *

      - * This operation pads a `input` with mirrored values according to the `paddings` - * you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is - * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates - * how many values to add before the contents of `input` in that dimension, and - * `paddings[D, 1]` indicates how many values to add after the contents of `input` - * in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater - * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true - * (if false, respectively). + * Given `tensor`, this operation returns a tensor that has the same values + * as `tensor` with shape `shape`. *

      - * The padded size of each dimension D of the output is: + * If one component of 1-D tensor `shape` is the special value -1, the size of that + * dimension is computed so that the total size remains constant. In particular, a + * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be + * unknown. *

      - * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + * The `shape` must be 1-D and the operation returns a tensor with shape + * `shape` filled with the values of `tensor`. In this case, the number of elements + * implied by `shape` must be the same as the number of elements in `tensor`. + *

      + * It is an error if `shape` is not 1-D. *

      * For example: *

      {@code
      -   *  # 't' is [[1, 2, 3], [4, 5, 6]].
      -   *  # 'paddings' is [[1, 1]], [2, 2]].
      -   *  # 'mode' is SYMMETRIC.
      -   *  # rank of 't' is 2.
      -   *  pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
      -   *                        [2, 1, 1, 2, 3, 3, 2]
      -   *                        [5, 4, 4, 5, 6, 6, 5]
      -   *                        [5, 4, 4, 5, 6, 6, 5]]
      +   *  # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
      +   *  # tensor 't' has shape [9]
      +   *  reshape(t, [3, 3]) ==> [[1, 2, 3],
      +   *                          [4, 5, 6],
      +   *                          [7, 8, 9]]
      +   *
      +   *  # tensor 't' is [[[1, 1], [2, 2]],
      +   *  #                [[3, 3], [4, 4]]]
      +   *  # tensor 't' has shape [2, 2, 2]
      +   *  reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
      +   *                          [3, 3, 4, 4]]
      +   *
      +   *  # tensor 't' is [[[1, 1, 1],
      +   *  #                 [2, 2, 2]],
      +   *  #                [[3, 3, 3],
      +   *  #                 [4, 4, 4]],
      +   *  #                [[5, 5, 5],
      +   *  #                 [6, 6, 6]]]
      +   *  # tensor 't' has shape [3, 2, 3]
      +   *  # pass '[-1]' to flatten 't'
      +   *  reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
      +   *
      +   *  # -1 can also be used to infer the shape
      +   *
      +   *  # -1 is inferred to be 9:
      +   *  reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
      +   *                           [4, 4, 4, 5, 5, 5, 6, 6, 6]]
      +   *  # -1 is inferred to be 2:
      +   *  reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
      +   *                           [4, 4, 4, 5, 5, 5, 6, 6, 6]]
      +   *  # -1 is inferred to be 3:
      +   *  reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
      +   *                                [2, 2, 2],
      +   *                                [3, 3, 3]],
      +   *                               [[4, 4, 4],
      +   *                                [5, 5, 5],
      +   *                                [6, 6, 6]]]
      +   *
      +   *  # tensor 't' is [7]
      +   *  # shape `[]` reshapes to a scalar
      +   *  reshape(t, []) ==> 7
          *  }
      * * @param data type for {@code output()} output - * @param input The input tensor to be padded. - * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of `input`. - * @param mode Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions - * do not include the borders, while in symmetric mode the padded regions - * do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` - * is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and - * it is `[1, 2, 3, 3, 2]` in symmetric mode. - * @return a new instance of MirrorPad + * @param tensor + * @param shape Defines the shape of the output tensor. + * @return a new instance of Reshape */ - public MirrorPad mirrorPad(Operand input, - Operand paddings, String mode) { - return MirrorPad.create(scope, input, paddings, mode); + public Reshape reshape(Operand tensor, + Operand shape) { + return Reshape.create(scope, tensor, shape); } /** - * Wraps an arbitrary MLIR computation expressed as a module with a main() function. - *

      - * This operation does not have an associated kernel and is not intended to be - * executed in a regular TensorFlow session. Instead it is intended to be used for - * testing or for special case where a user intends to pass custom MLIR computation - * through a TensorFlow graph with the intent of having custom tooling processing - * it downstream (when targeting a different environment, like TensorFlow lite for - * example). - * The MLIR module is expected to have a main() function that will be used as an - * entry point. The inputs to the operations will be passed as argument to the - * main() function and the returned values of the main function mapped to the - * outputs. - * Example usage: - *

      {@code
      -   *  import tensorflow as tf
      -   *  from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
      -   *
      -   *  mlir_module = '''python
      -   *  func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
      -   *     %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
      -   *     return %ret : tensor<10x10xf32>
      -   *  }
      -   *  '''
      -   *
      -   * @tf.function def foo(x, y):
      -   *    return = mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
      +   * Increments variable pointed to by 'resource' until it reaches 'limit'.
          *
      -   *  graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
      -   *  }
      - * @param inputs - * @param mlirModule - * @param Toutputs - * @return a new instance of MlirPassthroughOp + * @param data type for {@code output()} output + * @param resource Should be from a scalar `Variable` node. + * @param limit If incrementing ref would bring it above limit, instead generates an + * 'OutOfRange' error. + * @param T + * @return a new instance of ResourceCountUpTo */ - public MlirPassthroughOp mlirPassthroughOp(Iterable> inputs, String mlirModule, - List> Toutputs) { - return MlirPassthroughOp.create(scope, inputs, mlirModule, Toutputs); + public ResourceCountUpTo resourceCountUpTo(Operand resource, Long limit, + DataType T) { + return ResourceCountUpTo.create(scope, resource, limit, T); } /** - * Creates an empty hash table that uses tensors as the backing store. - *

      - * It uses "open addressing" with quadratic reprobing to resolve - * collisions. + * Gather slices from the variable pointed to by `resource` according to `indices`. *

      - * This op creates a mutable hash table, specifying the type of its keys and - * values. Each value must be a scalar. Data can be inserted into the table using - * the insert operations. It does not support the initialization operation. + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + *

      {@code
      +   *      # Scalar indices
      +   *      output[:, ..., :] = params[indices, :, ... :]
          *
      -   * @param emptyKey The key used to represent empty key buckets internally. Must not
      -   *  be used in insert or lookup operations.
      -   * @param deletedKey
      -   * @param valueDtype Type of the table values.
      +   *      # Vector indices
      +   *      output[i, :, ..., :] = params[indices[i], :, ... :]
      +   *
      +   *      # Higher rank indices
      +   *      output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
      +   *  }
      + * + * @param data type for {@code output()} output + * @param resource + * @param indices + * @param dtype * @param options carries optional attributes values - * @return a new instance of MutableDenseHashTable + * @return a new instance of ResourceGather */ - public MutableDenseHashTable mutableDenseHashTable( - Operand emptyKey, Operand deletedKey, DataType valueDtype, - MutableDenseHashTable.Options... options) { - return MutableDenseHashTable.create(scope, emptyKey, deletedKey, valueDtype, options); + public ResourceGather resourceGather(Operand resource, + Operand indices, DataType dtype, ResourceGather.Options... options) { + return ResourceGather.create(scope, resource, indices, dtype, options); } /** - * Creates an empty hash table. - *

      - * This op creates a mutable hash table, specifying the type of its keys and - * values. Each value must be a scalar. Data can be inserted into the table using - * the insert operations. It does not support the initialization operation. * - * @param keyDtype Type of the table keys. - * @param valueDtype Type of the table values. - * @param options carries optional attributes values - * @return a new instance of MutableHashTable + * @param data type for {@code output()} output + * @param resource + * @param indices + * @param dtype + * @return a new instance of ResourceGatherNd */ - public MutableHashTable mutableHashTable(DataType keyDtype, - DataType valueDtype, MutableHashTable.Options... options) { - return MutableHashTable.create(scope, keyDtype, valueDtype, options); + public ResourceGatherNd resourceGatherNd( + Operand resource, Operand indices, DataType dtype) { + return ResourceGatherNd.create(scope, resource, indices, dtype); } /** - * Creates an empty hash table. + * Adds sparse updates to the variable referenced by `resource`. *

      - * This op creates a mutable hash table, specifying the type of its keys and - * values. Each value must be a vector. Data can be inserted into the table using - * the insert operations. It does not support the initialization operation. + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] += updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] += updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param keyDtype Type of the table keys. - * @param valueDtype Type of the table values. - * @param options carries optional attributes values - * @return a new instance of MutableHashTableOfTensors + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterAdd */ - public MutableHashTableOfTensors mutableHashTableOfTensors( - DataType keyDtype, DataType valueDtype, MutableHashTableOfTensors.Options... options) { - return MutableHashTableOfTensors.create(scope, keyDtype, valueDtype, options); + public ResourceScatterAdd resourceScatterAdd( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterAdd.create(scope, resource, indices, updates); } /** - * Creates a Mutex resource that can be locked by `MutexLock`. + * Divides sparse updates into the variable referenced by `resource`. + *

      + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] /= updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] /= updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param options carries optional attributes values - * @return a new instance of Mutex + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterDiv */ - public Mutex mutex(Mutex.Options... options) { - return Mutex.create(scope, options); + public ResourceScatterDiv resourceScatterDiv( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterDiv.create(scope, resource, indices, updates); } /** - * Locks a mutex resource. The output is the lock. So long as the lock tensor + * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. *

      - * is alive, any other request to use `MutexLock` with this mutex will wait. + * This operation computes *

      - * This is particularly useful for creating a critical section when used in - * conjunction with `MutexLockIdentity`: - *

      {@code
      -   *  mutex = mutex_v2(
      -   *    shared_name=handle_name, container=container, name=name)
      -   *
      -   *  def execute_in_critical_section(fn, *args, **kwargs):
      -   *    lock = gen_resource_variable_ops.mutex_lock(mutex)
      -   *
      -   *    with ops.control_dependencies([lock]):
      -   *      r = fn(*args, **kwargs)
      -   *
      -   *    with ops.control_dependencies(nest.flatten(r)):
      -   *      with ops.colocate_with(mutex):
      -   *        ensure_lock_exists = mutex_lock_identity(lock)
      -   *
      -   *      # Make sure that if any element of r is accessed, all of
      -   *      # them are executed together.
      -   *      r = nest.map_structure(tf.identity, r)
      -   *
      -   *    with ops.control_dependencies([ensure_lock_exists]):
      -   *      return nest.map_structure(tf.identity, r)
      -   *  }
      - * While `fn` is running in the critical section, no other functions which wish to - * use this critical section may run. + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) *

      - * Often the use case is that two executions of the same graph, in parallel, - * wish to run `fn`; and we wish to ensure that only one of them executes - * at a time. This is especially important if `fn` modifies one or more - * variables at a time. + * # Vector indices (for each i) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) *

      - * It is also useful if two separate functions must share a resource, but we - * wish to ensure the usage is exclusive. + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions are combined. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param mutex The mutex resource to lock. - * @return a new instance of MutexLock + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMax */ - public MutexLock mutexLock(Operand mutex) { - return MutexLock.create(scope, mutex); + public ResourceScatterMax resourceScatterMax( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterMax.create(scope, resource, indices, updates); } /** - * Makes its input available to the next iteration. + * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. + *

      + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions are combined. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param data type for {@code output()} output - * @param data The tensor to be made available to the next iteration. - * @return a new instance of NextIteration + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMin */ - public NextIteration nextIteration(Operand data) { - return NextIteration.create(scope, data); + public ResourceScatterMin resourceScatterMin( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterMin.create(scope, resource, indices, updates); } /** - * Does nothing. Only useful as a placeholder for control edges. + * Multiplies sparse updates into the variable referenced by `resource`. + *

      + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] *= updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] *= updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @return a new instance of NoOp + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMul */ - public NoOp noOp() { - return NoOp.create(scope); + public ResourceScatterMul resourceScatterMul( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterMul.create(scope, resource, indices, updates); } /** - * Returns a one-hot tensor. + * Applies sparse addition to individual values or slices in a Variable. *

      - * The locations represented by indices in `indices` take value `on_value`, - * while all other locations take value `off_value`. + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. *

      - * If the input `indices` is rank `N`, the output will have rank `N+1`, - * The new axis is created at dimension `axis` (default: the new axis is - * appended at the end). + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. *

      - * If `indices` is a scalar the output shape will be a vector of length `depth`. + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. *

      - * If `indices` is a vector of length `features`, the output shape will be: + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: *

      {@code
      -   *    features x depth if axis == -1
      -   *    depth x features if axis == 0
      +   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
          *  }
      - * If `indices` is a matrix (batch) with shape `[batch, features]`, - * the output shape will be: + * For example, say we want to add 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that addition would look like this: *
      {@code
      -   *    batch x features x depth if axis == -1
      -   *    batch x depth x features if axis == 1
      -   *    depth x batch x features if axis == 0
      +   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
      +   *  indices = tf.constant([[4], [3], [1], [7]])
      +   *  updates = tf.constant([9, 10, 11, 12])
      +   *  add = tf.scatter_nd_add(ref, indices, updates)
      +   *  with tf.Session() as sess:
      +   *    print sess.run(add)
          *  }
      - * Examples - * ========= + * The resulting update to ref would look like this: *

      - * Suppose that - *

      {@code
      -   *    indices = [0, 2, -1, 1]
      -   *    depth = 3
      -   *    on_value = 5.0
      -   *    off_value = 0.0
      -   *    axis = -1
      -   *  }
      - * Then output is `[4 x 3]`: - *
      {@code
      -   *  output =
      -   *    [5.0 0.0 0.0]  // one_hot(0)
      -   *    [0.0 0.0 5.0]  // one_hot(2)
      -   *    [0.0 0.0 0.0]  // one_hot(-1)
      -   *    [0.0 5.0 0.0]  // one_hot(1)
      -   *  }
      - * Suppose that - *
      {@code
      -   *    indices = [0, 2, -1, 1]
      -   *    depth = 3
      -   *    on_value = 0.0
      -   *    off_value = 3.0
      -   *    axis = 0
      -   *  }
      - * Then output is `[3 x 4]`: - *
      {@code
      -   *  output =
      -   *    [0.0 3.0 3.0 3.0]
      -   *    [3.0 3.0 3.0 0.0]
      -   *    [3.0 3.0 3.0 3.0]
      -   *    [3.0 0.0 3.0 3.0]
      -   *  //  ^                one_hot(0)
      -   *  //      ^            one_hot(2)
      -   *  //          ^        one_hot(-1)
      -   *  //              ^    one_hot(1)
      -   *  }
      - * Suppose that + * [1, 13, 3, 14, 14, 6, 7, 20] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values to add to ref. + * @param options carries optional attributes values + * @return a new instance of ResourceScatterNdAdd + */ + public ResourceScatterNdAdd resourceScatterNdAdd( + Operand ref, Operand indices, Operand updates, + ResourceScatterNdAdd.Options... options) { + return ResourceScatterNdAdd.create(scope, ref, indices, updates, options); + } + + /** + * Applies sparse subtraction to individual values or slices in a Variable. + *

      + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + *

      + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + *

      + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + *

      + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: *

      {@code
      -   *    indices = [[0, 2], [1, -1]]
      -   *    depth = 3
      -   *    on_value = 1.0
      -   *    off_value = 0.0
      -   *    axis = -1
      +   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
          *  }
      - * Then output is `[2 x 2 x 3]`: + * For example, say we want to subtract 4 scattered elements from a rank-1 tensor + * with 8 elements. In Python, that subtraction would look like this: *
      {@code
      -   *  output =
      -   *    [
      -   *      [1.0, 0.0, 0.0]  // one_hot(0)
      -   *      [0.0, 0.0, 1.0]  // one_hot(2)
      -   *    ][
      -   *      [0.0, 1.0, 0.0]  // one_hot(1)
      -   *      [0.0, 0.0, 0.0]  // one_hot(-1)
      -   *    ]
      +   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
      +   *  indices = tf.constant([[4], [3], [1], [7]])
      +   *  updates = tf.constant([9, 10, 11, 12])
      +   *  sub = tf.scatter_nd_sub(ref, indices, updates)
      +   *  with tf.Session() as sess:
      +   *    print sess.run(sub)
          *  }
      + * The resulting update to ref would look like this: + *

      + * [1, -9, 3, -6, -4, 6, 7, -4] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. * - * @param data type for {@code output()} output - * @param indices A tensor of indices. - * @param depth A scalar defining the depth of the one hot dimension. - * @param onValue A scalar defining the value to fill in output when `indices[j] = i`. - * @param offValue A scalar defining the value to fill in output when `indices[j] != i`. + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values to add to ref. * @param options carries optional attributes values - * @return a new instance of OneHot + * @return a new instance of ResourceScatterNdSub */ - public OneHot oneHot(Operand indices, - Operand depth, Operand onValue, Operand offValue, OneHot.Options... options) { - return OneHot.create(scope, indices, depth, onValue, offValue, options); + public ResourceScatterNdSub resourceScatterNdSub( + Operand ref, Operand indices, Operand updates, + ResourceScatterNdSub.Options... options) { + return ResourceScatterNdSub.create(scope, ref, indices, updates, options); } /** - * Returns a tensor of ones with the same shape and type as x. - * - * @param data type for {@code y()} output - * @param x a tensor of type T. - * @return a new instance of OnesLike - */ - public OnesLike onesLike(Operand x) { - return OnesLike.create(scope, x); - } - - /** - * Op removes all elements in the underlying container. - * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of OrderedMapClear - */ - public OrderedMapClear orderedMapClear(List> dtypes, - OrderedMapClear.Options... options) { - return OrderedMapClear.create(scope, dtypes, options); - } - - /** - * Op returns the number of incomplete elements in the underlying container. - * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of OrderedMapIncompleteSize - */ - public OrderedMapIncompleteSize orderedMapIncompleteSize(List> dtypes, - OrderedMapIncompleteSize.Options... options) { - return OrderedMapIncompleteSize.create(scope, dtypes, options); - } - - /** - * Op peeks at the values at the specified key. If the + * Applies sparse `updates` to individual values or slices within a given *

      - * underlying container does not contain this key - * this op will block until it does. This Op is optimized for - * performance. - * - * @param key - * @param indices - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of OrderedMapPeek - */ - public OrderedMapPeek orderedMapPeek(Operand key, Operand indices, - List> dtypes, OrderedMapPeek.Options... options) { - return OrderedMapPeek.create(scope, key, indices, dtypes, options); - } - - /** - * Op returns the number of elements in the underlying container. + * variable according to `indices`. + *

      + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + *

      + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + *

      + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + *

      + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + *

      {@code
      +   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
      +   *  }
      + * For example, say we want to update 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that update would look like this: + *
      {@code
      +   *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
      +   *      indices = tf.constant([[4], [3], [1] ,[7]])
      +   *      updates = tf.constant([9, 10, 11, 12])
      +   *      update = tf.scatter_nd_update(ref, indices, updates)
      +   *      with tf.Session() as sess:
      +   *        print sess.run(update)
      +   *  }
      + * The resulting update to ref would look like this: + *

      + * [1, 11, 3, 10, 9, 6, 7, 12] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. * - * @param dtypes + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated + * values to add to ref. * @param options carries optional attributes values - * @return a new instance of OrderedMapSize + * @return a new instance of ResourceScatterNdUpdate */ - public OrderedMapSize orderedMapSize(List> dtypes, - OrderedMapSize.Options... options) { - return OrderedMapSize.create(scope, dtypes, options); + public ResourceScatterNdUpdate resourceScatterNdUpdate( + Operand ref, Operand indices, Operand updates, + ResourceScatterNdUpdate.Options... options) { + return ResourceScatterNdUpdate.create(scope, ref, indices, updates, options); } /** - * Stage (key, values) in the underlying container which behaves like a ordered + * Subtracts sparse updates from the variable referenced by `resource`. *

      - * associative container. Elements are ordered by key. + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] -= updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] -= updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param key int64 - * @param indices - * @param values a list of tensors - * dtypes A list of data types that inserted values should adhere to. - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of OrderedMapStage + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterSub */ - public OrderedMapStage orderedMapStage(Operand key, Operand indices, - Iterable> values, List> dtypes, OrderedMapStage.Options... options) { - return OrderedMapStage.create(scope, key, indices, values, dtypes, options); + public ResourceScatterSub resourceScatterSub( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterSub.create(scope, resource, indices, updates); } /** - * Op removes and returns the values associated with the key + * Assigns sparse updates to the variable referenced by `resource`. *

      - * from the underlying container. If the underlying container - * does not contain this key, the op will block until it does. + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] = updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] = updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] * - * @param key - * @param indices - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of OrderedMapUnstage + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterUpdate */ - public OrderedMapUnstage orderedMapUnstage(Operand key, Operand indices, - List> dtypes, OrderedMapUnstage.Options... options) { - return OrderedMapUnstage.create(scope, key, indices, dtypes, options); + public ResourceScatterUpdate resourceScatterUpdate( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterUpdate.create(scope, resource, indices, updates); } /** - * Op removes and returns the (key, value) element with the smallest + * Assign `value` to the sliced l-value reference of `ref`. *

      - * key from the underlying container. If the underlying container - * does not contain elements, the op will block until it does. + * The values of `value` are assigned to the positions in the variable + * `ref` that are selected by the slice parameters. The slice parameters + * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. + *

      + * NOTE this op currently does not support broadcasting and so `value`'s + * shape must be exactly the shape produced by the slice of `ref`. * - * @param indices - * @param dtypes + * @param ref + * @param begin + * @param end + * @param strides + * @param value * @param options carries optional attributes values - * @return a new instance of OrderedMapUnstageNoKey + * @return a new instance of ResourceStridedSliceAssign */ - public OrderedMapUnstageNoKey orderedMapUnstageNoKey(Operand indices, - List> dtypes, OrderedMapUnstageNoKey.Options... options) { - return OrderedMapUnstageNoKey.create(scope, indices, dtypes, options); + public ResourceStridedSliceAssign resourceStridedSliceAssign( + Operand ref, Operand begin, Operand end, Operand strides, Operand value, + ResourceStridedSliceAssign.Options... options) { + return ResourceStridedSliceAssign.create(scope, ref, begin, end, strides, value, options); } /** - * Pads a tensor. + * Reverses specific dimensions of a tensor. *

      - * This operation pads `input` according to the `paddings` and `constant_values` - * you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is - * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates - * how many padding values to add before the contents of `input` in that dimension, - * and `paddings[D, 1]` indicates how many padding values to add after the contents - * of `input` in that dimension. `constant_values` is a scalar tensor of the same - * type as `input` that indicates the value to use for padding `input`. + * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. + * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. *

      - * The padded size of each dimension D of the output is: + * Given a `tensor`, and a `int32` tensor `axis` representing the set of + * dimensions of `tensor` to reverse. This operation reverses each dimension + * `i` for which there exists `j` s.t. `axis[j] == i`. *

      - * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + * `tensor` can have up to 8 dimensions. The number of dimensions specified + * in `axis` may be 0 or more entries. If an index is specified more than + * once, a InvalidArgument error is raised. *

      * For example: *

      {@code
      -   *  # 't' is [[1, 1], [2, 2]]
      -   *  # 'paddings' is [[1, 1], [2, 2]]
      -   *  # 'constant_values' is 0
      -   *  # rank of 't' is 2
      -   *  pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
      -   *                        [0, 0, 1, 1, 0, 0]
      -   *                        [0, 0, 2, 2, 0, 0]
      -   *                        [0, 0, 0, 0, 0, 0]]
      +   *  # tensor 't' is [[[[ 0,  1,  2,  3],
      +   *  #                  [ 4,  5,  6,  7],
      +   *  #                  [ 8,  9, 10, 11]],
      +   *  #                 [[12, 13, 14, 15],
      +   *  #                  [16, 17, 18, 19],
      +   *  #                  [20, 21, 22, 23]]]]
      +   *  # tensor 't' shape is [1, 2, 3, 4]
      +   *
      +   *  # 'dims' is [3] or 'dims' is [-1]
      +   *  reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
      +   *                          [ 7,  6,  5,  4],
      +   *                          [ 11, 10, 9, 8]],
      +   *                         [[15, 14, 13, 12],
      +   *                          [19, 18, 17, 16],
      +   *                          [23, 22, 21, 20]]]]
      +   *
      +   *  # 'dims' is '[1]' (or 'dims' is '[-3]')
      +   *  reverse(t, dims) ==> [[[[12, 13, 14, 15],
      +   *                          [16, 17, 18, 19],
      +   *                          [20, 21, 22, 23]
      +   *                         [[ 0,  1,  2,  3],
      +   *                          [ 4,  5,  6,  7],
      +   *                          [ 8,  9, 10, 11]]]]
      +   *
      +   *  # 'dims' is '[2]' (or 'dims' is '[-2]')
      +   *  reverse(t, dims) ==> [[[[8, 9, 10, 11],
      +   *                          [4, 5, 6, 7],
      +   *                          [0, 1, 2, 3]]
      +   *                         [[20, 21, 22, 23],
      +   *                          [16, 17, 18, 19],
      +   *                          [12, 13, 14, 15]]]]
          *  }
      * * @param data type for {@code output()} output - * @param input - * @param paddings - * @param constantValues - * @return a new instance of Pad - */ - public Pad pad(Operand input, Operand paddings, - Operand constantValues) { - return Pad.create(scope, input, paddings, constantValues); + * @param tensor Up to 8-D. + * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range + * `[-rank(tensor), rank(tensor))`. + * @return a new instance of Reverse + */ + public Reverse reverse(Operand tensor, + Operand axis) { + return Reverse.create(scope, tensor, axis); } /** - * Concatenates a list of `N` tensors along the first dimension. + * Reverses variable length slices. *

      - * The input tensors are all required to have size 1 in the first dimension. + * This op first slices `input` along the dimension `batch_dim`, and for each + * slice `i`, reverses the first `seq_lengths[i]` elements along + * the dimension `seq_dim`. + *

      + * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, + * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. + *

      + * The output slice `i` along dimension `batch_dim` is then given by input + * slice `i`, with the first `seq_lengths[i]` slices along dimension + * `seq_dim` reversed. *

      * For example: *

      {@code
      -   *  # 'x' is [[1, 4]]
      -   *  # 'y' is [[2, 5]]
      -   *  # 'z' is [[3, 6]]
      -   *  parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
      +   *  # Given this:
      +   *  batch_dim = 0
      +   *  seq_dim = 1
      +   *  input.dims = (4, 8, ...)
      +   *  seq_lengths = [7, 2, 3, 5]
      +   *
      +   *  # then slices of input are reversed on seq_dim, but only up to seq_lengths:
      +   *  output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
      +   *  output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
      +   *  output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
      +   *  output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
      +   *
      +   *  # while entries past seq_lens are copied through:
      +   *  output[0, 7:, :, ...] = input[0, 7:, :, ...]
      +   *  output[1, 2:, :, ...] = input[1, 2:, :, ...]
      +   *  output[2, 3:, :, ...] = input[2, 3:, :, ...]
      +   *  output[3, 2:, :, ...] = input[3, 2:, :, ...]
      +   *  }
      + * In contrast, if: + *
      {@code
      +   *  # Given this:
      +   *  batch_dim = 2
      +   *  seq_dim = 0
      +   *  input.dims = (8, ?, 4, ...)
      +   *  seq_lengths = [7, 2, 3, 5]
      +   *
      +   *  # then slices of input are reversed on seq_dim, but only up to seq_lengths:
      +   *  output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
      +   *  output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
      +   *  output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
      +   *  output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
      +   *
      +   *  # while entries past seq_lens are copied through:
      +   *  output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
      +   *  output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
      +   *  output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
      +   *  output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
          *  }
      - * The difference between concat and parallel_concat is that concat requires all - * of the inputs be computed before the operation will begin but doesn't require - * that the input shapes be known during graph construction. Parallel concat - * will copy pieces of the input into the output as they become available, in - * some situations this can provide a performance benefit. * * @param data type for {@code output()} output - * @param values Tensors to be concatenated. All must have size 1 in the first dimension - * and same shape. - * @param shape the final shape of the result; should be equal to the shapes of any input - * but with the number of input values in the first dimension. - * @return a new instance of ParallelConcat + * @param input The input to reverse. + * @param seqLengths 1-D with length `input.dims(batch_dim)` and + * `max(seq_lengths) <= input.dims(seq_dim)` + * @param seqDim The dimension which is partially reversed. + * @param options carries optional attributes values + * @return a new instance of ReverseSequence */ - public ParallelConcat parallelConcat(Iterable> values, - Shape shape) { - return ParallelConcat.create(scope, values, shape); + public ReverseSequence reverseSequence(Operand input, + Operand seqLengths, Long seqDim, ReverseSequence.Options... options) { + return ReverseSequence.create(scope, input, seqLengths, seqDim, options); } /** - * Interleave the values from the `data` tensors into a single tensor. + * Rolls the elements of a tensor along an axis. *

      - * Builds a merged tensor such that - *

      {@code
      -   *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
      -   *  }
      - * For example, if each `indices[m]` is scalar or vector, we have + * The elements are shifted positively (towards larger indices) by the offset of + * `shift` along the dimension of `axis`. Negative `shift` values will shift + * elements in the opposite direction. Elements that roll passed the last position + * will wrap around to the first and vice versa. Multiple shifts along multiple + * axes may be specified. + *

      + * For example: *

      {@code
      -   *      # Scalar indices:
      -   *      merged[indices[m], ...] = data[m][...]
      +   *  # 't' is [0, 1, 2, 3, 4]
      +   *  roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
          *
      -   *      # Vector indices:
      -   *      merged[indices[m][i], ...] = data[m][i, ...]
      +   *  # shifting along multiple dimensions
      +   *  # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
      +   *  roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
      +   *
      +   *  # shifting along the same axis multiple times
      +   *  # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
      +   *  roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
          *  }
      - * Each `data[i].shape` must start with the corresponding `indices[i].shape`, - * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we - * must have `data[i].shape = indices[i].shape + constant`. In terms of this - * `constant`, the output shape is + * + * @param data type for {@code output()} output + * @param input + * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which + * elements are shifted positively (towards larger indices) along the dimension + * specified by `axis[i]`. Negative shifts will roll the elements in the opposite + * direction. + * @param axis Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift + * `shift[i]` should occur. If the same axis is referenced more than once, the + * total shift for that axis will be the sum of all the shifts that belong to that + * axis. + * @return a new instance of Roll + */ + public Roll roll(Operand input, + Operand shift, Operand axis) { + return Roll.create(scope, input, shift, axis); + } + + /** + * Perform batches of RPC requests. *

      - * merged.shape = [max(indices)] + constant + * This op asynchronously performs either a single RPC request, or a batch + * of requests. RPC requests are defined by three main parameters: *

      - * Values may be merged in parallel, so if an index appears in both `indices[m][i]` - * and `indices[n][j]`, the result may be invalid. This differs from the normal - * DynamicStitch operator that defines the behavior in that case. + * - `address` (the host+port or BNS address of the request) + * - `method` (the RPC method name for the request) + * - `request` (the serialized proto string, or vector of strings, + * of the RPC request argument). *

      - * For example: + * For example, if you have an RPC service running on port localhost:2345, + * and its interface is configured with the following proto declaration: *

      {@code
      -   *      indices[0] = 6
      -   *      indices[1] = [4, 1]
      -   *      indices[2] = [[5, 2], [0, 3]]
      -   *      data[0] = [61, 62]
      -   *      data[1] = [[41, 42], [11, 12]]
      -   *      data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
      -   *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
      -   *                [51, 52], [61, 62]]
      +   *  service MyService {
      +   *    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
      +   *    }
      +   *  };
          *  }
      - * This method can be used to merge partitions created by `dynamic_partition` - * as illustrated on the following example: + * then call this op with arguments: *
      {@code
      -   *      # Apply function (increments x_i) on elements for which a certain condition
      -   *      # apply (x_i != -1 in this example).
      -   *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
      -   *      condition_mask=tf.not_equal(x,tf.constant(-1.))
      -   *      partitioned_data = tf.dynamic_partition(
      -   *          x, tf.cast(condition_mask, tf.int32) , 2)
      -   *      partitioned_data[1] = partitioned_data[1] + 1.0
      -   *      condition_indices = tf.dynamic_partition(
      -   *          tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
      -   *      x = tf.dynamic_stitch(condition_indices, partitioned_data)
      -   *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
      -   *      # unchanged.
      +   *  address = "localhost:2345"
      +   *  method = "MyService/MyMethod"
          *  }
      - *
      - * - *
      - * - * @param data type for {@code merged()} output - * @param indices - * @param data - * @return a new instance of ParallelDynamicStitch - */ - public ParallelDynamicStitch parallelDynamicStitch( - Iterable> indices, Iterable> data) { - return ParallelDynamicStitch.create(scope, indices, data); - } - - /** - * A placeholder op for a value that will be fed into the computation. + * The `request` tensor is a string tensor representing serialized `MyRequestProto` + * strings; and the output string tensor `response` will have the same shape + * and contain (upon successful completion) corresponding serialized + * `MyResponseProto` strings. *

      - * N.B. This operation will fail with an error if it is executed. It is - * intended as a way to represent a value that will always be fed, and to - * provide attrs that enable the fed value to be checked at runtime. + * For example, to send a single, empty, `MyRequestProto`, call + * this op with `request = ""`. To send 5 parallel empty requests, + * call this op with `request = ["", "", "", "", ""]`. + *

      + * More generally, one can create a batch of `MyRequestProto` serialized protos + * from regular batched tensors using the `encode_proto` op, and convert + * the response `MyResponseProto` serialized protos to batched tensors + * using the `decode_proto` op. + *

      + * NOTE Working with serialized proto strings is faster than instantiating + * actual proto objects in memory, so no performance degradation is expected + * compared to writing custom kernels for this workflow. + *

      + * If the connection fails or the remote worker returns an error + * status, the op reraises this exception locally. + *

      + * See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. * - * @param data type for {@code output()} output - * @param dtype The type of elements in the tensor. + * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `method` and `request`. + * @param method `0-D` or `1-D`. The method address on the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `request`. + * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `method`. * @param options carries optional attributes values - * @return a new instance of Placeholder - */ - public Placeholder placeholder(DataType dtype, - Placeholder.Options... options) { - return Placeholder.create(scope, dtype, options); - } - - /** - * A placeholder op that passes through `input` when its output is not fed. - * - * @param data type for {@code output()} output - * @param input The default value to produce when `output` is not fed. - * @param shape The (possibly partial) shape of the tensor. - * @return a new instance of PlaceholderWithDefault + * @return a new instance of Rpc */ - public PlaceholderWithDefault placeholderWithDefault(Operand input, - Shape shape) { - return PlaceholderWithDefault.create(scope, input, shape); + public Rpc rpc(Operand address, Operand method, Operand request, + Rpc.Options... options) { + return Rpc.create(scope, address, method, request, options); } /** - * Prints a string scalar. + * Adds sparse updates to a variable reference. *

      - * Prints a string scalar to the desired output_stream. - * - * @param input The string scalar to print. - * @param options carries optional attributes values - * @return a new instance of Print - */ - public Print print(Operand input, Print.Options... options) { - return Print.create(scope, input, options); - } - - /** - * Computes the product of elements across dimensions of a tensor. + * This operation computes *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * # Scalar indices + * ref[indices, ...] += updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] += updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + *

      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. * @param options carries optional attributes values - * @return a new instance of Prod + * @return a new instance of ScatterAdd */ - public Prod prod(Operand input, Operand axis, - Prod.Options... options) { - return Prod.create(scope, input, axis, options); + public ScatterAdd scatterAdd(Operand ref, + Operand indices, Operand updates, ScatterAdd.Options... options) { + return ScatterAdd.create(scope, ref, indices, updates, options); } /** - * Reshapes a quantized tensor as per the Reshape op. + * Divides a variable reference by sparse updates. *

      - * ``` + * This operation computes + *

      {@code
      +   *      # Scalar indices
      +   *      ref[indices, ...] /= updates[...]
          *
      -   * @param  data type for {@code output()} output
      -   * @param tensor
      -   * @param shape Defines the shape of the output tensor.
      -   * @param inputMin The minimum value of the input.
      -   * @param inputMax The maximum value of the input.
      -   * @return a new instance of QuantizedReshape
      -   */
      -  public  QuantizedReshape quantizedReshape(
      -      Operand tensor, Operand shape, Operand inputMin, Operand inputMax) {
      -    return QuantizedReshape.create(scope, tensor, shape, inputMin, inputMax);
      -  }
      -
      -  /**
      -   * Creates a sequence of numbers.
      +   *      # Vector indices (for each i)
      +   *      ref[indices[i], ...] /= updates[i, ...]
      +   *
      +   *      # High rank indices (for each i, ..., j)
      +   *      ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
      +   *  }
      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. *

      - * This operation creates a sequence of numbers that begins at `start` and - * extends by increments of `delta` up to but not including `limit`. + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions divide. *

      - * For example: - *

      {@code
      -   *  # 'start' is 3
      -   *  # 'limit' is 18
      -   *  # 'delta' is 3
      -   *  tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
      -   *  }
      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. * - * @param data type for {@code output()} output - * @param start 0-D (scalar). First entry in the sequence. - * @param limit 0-D (scalar). Upper limit of sequence, exclusive. - * @param delta 0-D (scalar). Optional. Default is 1. Number that increments `start`. - * @return a new instance of Range + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of values that `ref` is divided by. + * @param options carries optional attributes values + * @return a new instance of ScatterDiv */ - public Range range(Operand start, Operand limit, Operand delta) { - return Range.create(scope, start, limit, delta); + public ScatterDiv scatterDiv(Operand ref, + Operand indices, Operand updates, ScatterDiv.Options... options) { + return ScatterDiv.create(scope, ref, indices, updates, options); } /** - * Returns the rank of a tensor. + * Reduces sparse updates into a variable reference using the `max` operation. *

      - * This operation returns an integer representing the rank of `input`. + * This operation computes *

      - * For example: - *

      {@code
      -   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
      -   *  # shape of tensor 't' is [2, 2, 3]
      -   *  rank(t) ==> 3
      -   *  }
      - * Note: The rank of a tensor is not the same as the rank of a matrix. The rank - * of a tensor is the number of indices required to uniquely select each element - * of the tensor. Rank is also known as "order", "degree", or "ndims." - * - * @param input - * @return a new instance of Rank - */ - public Rank rank(Operand input) { - return Rank.create(scope, input); - } - - /** - * Reads the value of a variable. + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) *

      - * The tensor returned by this operation is immutable. + * # Vector indices (for each i) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) *

      - * The value returned by this operation is guaranteed to be influenced by all the - * writes on which this operation depends directly or indirectly, and to not be - * influenced by any of the writes which depend directly or indirectly on this - * operation. + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + *

      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions combine. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param data type for {@code value()} output - * @param resource handle to the resource in which to store the variable. - * @param dtype the dtype of the value. - * @return a new instance of ReadVariableOp + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to reduce into `ref`. + * @param options carries optional attributes values + * @return a new instance of ScatterMax */ - public ReadVariableOp readVariableOp(Operand resource, - DataType dtype) { - return ReadVariableOp.create(scope, resource, dtype); + public ScatterMax scatterMax(Operand ref, + Operand indices, Operand updates, ScatterMax.Options... options) { + return ScatterMax.create(scope, ref, indices, updates, options); } /** - * Computes the "logical and" of elements across dimensions of a tensor. + * Reduces sparse updates into a variable reference using the `min` operation. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + *

      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions combine. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to reduce into `ref`. * @param options carries optional attributes values - * @return a new instance of ReduceAll + * @return a new instance of ScatterMin */ - public ReduceAll reduceAll(Operand input, Operand axis, - ReduceAll.Options... options) { - return ReduceAll.create(scope, input, axis, options); + public ScatterMin scatterMin(Operand ref, + Operand indices, Operand updates, ScatterMin.Options... options) { + return ScatterMin.create(scope, ref, indices, updates, options); } /** - * Computes the "logical or" of elements across dimensions of a tensor. + * Multiplies sparse updates into a variable reference. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * This operation computes + *

      {@code
      +   *      # Scalar indices
      +   *      ref[indices, ...] *= updates[...]
          *
      -   * @param input The tensor to reduce.
      -   * @param axis The dimensions to reduce. Must be in the range
      -   *  `[-rank(input), rank(input))`.
      +   *      # Vector indices (for each i)
      +   *      ref[indices[i], ...] *= updates[i, ...]
      +   *
      +   *      # High rank indices (for each i, ..., j)
      +   *      ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
      +   *  }
      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to multiply to `ref`. * @param options carries optional attributes values - * @return a new instance of ReduceAny + * @return a new instance of ScatterMul */ - public ReduceAny reduceAny(Operand input, Operand axis, - ReduceAny.Options... options) { - return ReduceAny.create(scope, input, axis, options); + public ScatterMul scatterMul(Operand ref, + Operand indices, Operand updates, ScatterMul.Options... options) { + return ScatterMul.create(scope, ref, indices, updates, options); } /** - * Computes the maximum of elements across dimensions of a tensor. + * Scatter `updates` into a new tensor according to `indices`. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * Creates a new tensor by applying sparse `updates` to individual values or + * slices within a tensor (initially zero for numeric, empty for string) of + * the given `shape` according to indices. This operator is the inverse of the + * `tf.gather_nd` operator which extracts values or slices from a given tensor. + *

      + * This operation is similar to tensor_scatter_add, except that the tensor is + * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical + * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` + *

      + * If `indices` contains duplicates, then their updates are accumulated (summed). + *

      + * WARNING: The order in which updates are applied is nondeterministic, so the + * output will be nondeterministic if `indices` contains duplicates -- because + * of some numerical approximation issues, numbers summed in different order + * may yield different results. + *

      + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + *

      + * indices.shape[-1] <= shape.rank + *

      + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape + *

      + * indices.shape[:-1] + shape[indices.shape[-1]:] + *

      + * The simplest form of scatter is to insert individual elements in a tensor by + * index. For example, say we want to insert 4 scattered elements in a rank-1 + * tensor with 8 elements. + *

      + *

      + * + *
      + *

      + * In Python, this scatter operation would look like this: + *

      {@code
      +   *      indices = tf.constant([[4], [3], [1], [7]])
      +   *      updates = tf.constant([9, 10, 11, 12])
      +   *      shape = tf.constant([8])
      +   *      scatter = tf.scatter_nd(indices, updates, shape)
      +   *      print(scatter)
      +   *  }
      + * The resulting tensor would look like this: + *

      + * [0, 11, 0, 10, 9, 0, 0, 12] + *

      + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + *

      + *

      + * + *
      + *

      + * In Python, this scatter operation would look like this: + *

      {@code
      +   *      indices = tf.constant([[0], [2]])
      +   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
      +   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
      +   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
      +   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
      +   *      shape = tf.constant([4, 4, 4])
      +   *      scatter = tf.scatter_nd(indices, updates, shape)
      +   *      print(scatter)
      +   *  }
      + * The resulting tensor would look like this: + *

      + * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + *

      + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. * - * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values - * @return a new instance of ReduceMax + * @param data type for {@code output()} output + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @param shape 1-D. The shape of the resulting tensor. + * @return a new instance of ScatterNd */ - public ReduceMax reduceMax(Operand input, - Operand axis, ReduceMax.Options... options) { - return ReduceMax.create(scope, input, axis, options); + public ScatterNd scatterNd(Operand indices, + Operand updates, Operand shape) { + return ScatterNd.create(scope, indices, updates, shape); } /** - * Computes the minimum of elements across dimensions of a tensor. + * Applies sparse addition to individual values or slices in a Variable. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + *

      + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + *

      + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + *

      + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + *

      {@code
      +   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
      +   *  }
      + * For example, say we want to add 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that addition would look like this: + *
      {@code
      +   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
      +   *  indices = tf.constant([[4], [3], [1], [7]])
      +   *  updates = tf.constant([9, 10, 11, 12])
      +   *  add = tf.scatter_nd_add(ref, indices, updates)
      +   *  with tf.Session() as sess:
      +   *    print sess.run(add)
      +   *  }
      + * The resulting update to ref would look like this: + *

      + * [1, 13, 3, 14, 14, 6, 7, 20] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. * - * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. + * @param data type for {@code outputRef()} output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to add to ref. * @param options carries optional attributes values - * @return a new instance of ReduceMin + * @return a new instance of ScatterNdAdd */ - public ReduceMin reduceMin(Operand input, - Operand axis, ReduceMin.Options... options) { - return ReduceMin.create(scope, input, axis, options); + public ScatterNdAdd scatterNdAdd(Operand ref, + Operand indices, Operand updates, ScatterNdAdd.Options... options) { + return ScatterNdAdd.create(scope, ref, indices, updates, options); } /** - * Computes the product of elements across dimensions of a tensor. + * Applies sparse addition to `input` using individual values or slices *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * from `updates` according to indices `indices`. The updates are non-aliasing: + * `input` is only modified in-place if no other operations will use it. + * Otherwise, a copy of `input` is made. This operation has a gradient with + * respect to both `input` and `updates`. + *

      + * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + *

      + * `indices` must be integer tensor, containing indices into `input`. + * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. + *

      + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or `(P-K)`-dimensional slices + * (if `K < P`) along the `K`th dimension of `input`. + *

      + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + *

      + * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ + *

      + * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + * elements. In Python, that addition would look like this: + *

      + * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) + * with tf.Session() as sess: + * print(sess.run(output)) + *

      + * The resulting value `output` would look like this: + *

      + * [1, 13, 3, 14, 14, 6, 7, 20] + *

      + * See `tf.scatter_nd` for more details about how to make updates to slices. * * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values - * @return a new instance of ReduceProd + * @param input A Tensor. + * @param indices A Tensor. Must be one of the following types: `int32`, `int64`. + * A tensor of indices into `input`. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to add to `input`. + * @return a new instance of ScatterNdNonAliasingAdd */ - public ReduceProd reduceProd(Operand input, - Operand axis, ReduceProd.Options... options) { - return ReduceProd.create(scope, input, axis, options); + public ScatterNdNonAliasingAdd scatterNdNonAliasingAdd( + Operand input, Operand indices, Operand updates) { + return ScatterNdNonAliasingAdd.create(scope, input, indices, updates); } /** - * Computes the sum of elements across dimensions of a tensor. + * Applies sparse subtraction to individual values or slices in a Variable. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * within a given variable according to `indices`. + *

      + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + *

      + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + *

      + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + *

      + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + *

      {@code
      +   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
      +   *  }
      + * For example, say we want to subtract 4 scattered elements from a rank-1 tensor + * with 8 elements. In Python, that subtraction would look like this: + *
      {@code
      +   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
      +   *  indices = tf.constant([[4], [3], [1], [7]])
      +   *  updates = tf.constant([9, 10, 11, 12])
      +   *  sub = tf.scatter_nd_sub(ref, indices, updates)
      +   *  with tf.Session() as sess:
      +   *    print sess.run(sub)
      +   *  }
      + * The resulting update to ref would look like this: + *

      + * [1, -9, 3, -6, -4, 6, 7, -4] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. * - * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. + * @param data type for {@code outputRef()} output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to subtract from ref. * @param options carries optional attributes values - * @return a new instance of ReduceSum + * @return a new instance of ScatterNdSub */ - public ReduceSum reduceSum(Operand input, - Operand axis, ReduceSum.Options... options) { - return ReduceSum.create(scope, input, axis, options); - } - - /** - * Makes its input available to the next iteration. - * - * @param data type for {@code output()} output - * @param data The tensor to be made available to the next iteration. - * @return a new instance of RefNextIteration - */ - public RefNextIteration refNextIteration(Operand data) { - return RefNextIteration.create(scope, data); - } - - /** - * Forwards the `index`th element of `inputs` to `output`. - * - * @param data type for {@code output()} output - * @param index A scalar that determines the input that gets selected. - * @param inputs A list of ref tensors, one of which will be forwarded to `output`. - * @return a new instance of RefSelect - */ - public RefSelect refSelect(Operand index, - Iterable> inputs) { - return RefSelect.create(scope, index, inputs); + public ScatterNdSub scatterNdSub(Operand ref, + Operand indices, Operand updates, ScatterNdSub.Options... options) { + return ScatterNdSub.create(scope, ref, indices, updates, options); } /** - * Forwards the ref tensor `data` to the output port determined by `pred`. - *

      - * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, - * the data goes to `output_false`. + * Applies sparse `updates` to individual values or slices within a given *

      - * See also `Switch` and `Merge`. - * - * @param data type for {@code outputFalse()} output - * @param data The ref tensor to be forwarded to the appropriate output. - * @param pred A scalar that specifies which output port will receive data. - * @return a new instance of RefSwitch - */ - public RefSwitch refSwitch(Operand data, Operand pred) { - return RefSwitch.create(scope, data, pred); - } - - /** - * Execute a sub graph on a remote processor. + * variable according to `indices`. *

      - * The graph specifications(such as graph itself, input tensors and output names) - * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo - * as serialized_remote_fused_graph_execute_info. - * The specifications will be passed to a dedicated registered - * remote fused graph executor. The executor will send the graph specifications - * to a remote processor and execute that graph. The execution results - * will be passed to consumer nodes as outputs of this node. - * - * @param inputs Arbitrary number of tensors with arbitrary data types - * @param Toutputs - * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer - * of RemoteFusedGraphExecuteInfo which contains graph specifications. - * @return a new instance of RemoteFusedGraphExecute - */ - public RemoteFusedGraphExecute remoteFusedGraphExecute(Iterable> inputs, - List> Toutputs, String serializedRemoteFusedGraphExecuteInfo) { - return RemoteFusedGraphExecute.create(scope, inputs, Toutputs, serializedRemoteFusedGraphExecuteInfo); - } - - /** - * Reshapes a tensor. + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. *

      - * Given `tensor`, this operation returns a tensor that has the same values - * as `tensor` with shape `shape`. + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. *

      - * If one component of 1-D tensor `shape` is the special value -1, the size of that - * dimension is computed so that the total size remains constant. In particular, a - * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be - * unknown. + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. *

      - * The `shape` must be 1-D and the operation returns a tensor with shape - * `shape` filled with the values of `tensor`. In this case, the number of elements - * implied by `shape` must be the same as the number of elements in `tensor`. + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: *

      - * It is an error if `shape` is not 1-D. + * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ *

      - * For example: + * For example, say we want to update 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that update would look like this: *

      {@code
      -   *  # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
      -   *  # tensor 't' has shape [9]
      -   *  reshape(t, [3, 3]) ==> [[1, 2, 3],
      -   *                          [4, 5, 6],
      -   *                          [7, 8, 9]]
      -   *
      -   *  # tensor 't' is [[[1, 1], [2, 2]],
      -   *  #                [[3, 3], [4, 4]]]
      -   *  # tensor 't' has shape [2, 2, 2]
      -   *  reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
      -   *                          [3, 3, 4, 4]]
      -   *
      -   *  # tensor 't' is [[[1, 1, 1],
      -   *  #                 [2, 2, 2]],
      -   *  #                [[3, 3, 3],
      -   *  #                 [4, 4, 4]],
      -   *  #                [[5, 5, 5],
      -   *  #                 [6, 6, 6]]]
      -   *  # tensor 't' has shape [3, 2, 3]
      -   *  # pass '[-1]' to flatten 't'
      -   *  reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
      -   *
      -   *  # -1 can also be used to infer the shape
      -   *
      -   *  # -1 is inferred to be 9:
      -   *  reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
      -   *                           [4, 4, 4, 5, 5, 5, 6, 6, 6]]
      -   *  # -1 is inferred to be 2:
      -   *  reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
      -   *                           [4, 4, 4, 5, 5, 5, 6, 6, 6]]
      -   *  # -1 is inferred to be 3:
      -   *  reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
      -   *                                [2, 2, 2],
      -   *                                [3, 3, 3]],
      -   *                               [[4, 4, 4],
      -   *                                [5, 5, 5],
      -   *                                [6, 6, 6]]]
      -   *
      -   *  # tensor 't' is [7]
      -   *  # shape `[]` reshapes to a scalar
      -   *  reshape(t, []) ==> 7
      +   *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
      +   *      indices = tf.constant([[4], [3], [1] ,[7]])
      +   *      updates = tf.constant([9, 10, 11, 12])
      +   *      update = tf.scatter_nd_update(ref, indices, updates)
      +   *      with tf.Session() as sess:
      +   *        print sess.run(update)
          *  }
      + * The resulting update to ref would look like this: + *

      + * [1, 11, 3, 10, 9, 6, 7, 12] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + *

      + * See also `tf.scatter_update` and `tf.batch_scatter_update`. * - * @param data type for {@code output()} output - * @param tensor - * @param shape Defines the shape of the output tensor. - * @return a new instance of Reshape - */ - public Reshape reshape(Operand tensor, - Operand shape) { - return Reshape.create(scope, tensor, shape); - } - - /** - * Increments variable pointed to by 'resource' until it reaches 'limit'. - * - * @param data type for {@code output()} output - * @param resource Should be from a scalar `Variable` node. - * @param limit If incrementing ref would bring it above limit, instead generates an - * 'OutOfRange' error. - * @param T - * @return a new instance of ResourceCountUpTo + * @param data type for {@code outputRef()} output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated + * values to add to ref. + * @param options carries optional attributes values + * @return a new instance of ScatterNdUpdate */ - public ResourceCountUpTo resourceCountUpTo(Operand resource, Long limit, - DataType T) { - return ResourceCountUpTo.create(scope, resource, limit, T); + public ScatterNdUpdate scatterNdUpdate(Operand ref, + Operand indices, Operand updates, ScatterNdUpdate.Options... options) { + return ScatterNdUpdate.create(scope, ref, indices, updates, options); } /** - * Gather slices from the variable pointed to by `resource` according to `indices`. + * Subtracts sparse updates to a variable reference. *

      - * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: *

      {@code
          *      # Scalar indices
      -   *      output[:, ..., :] = params[indices, :, ... :]
      +   *      ref[indices, ...] -= updates[...]
          *
      -   *      # Vector indices
      -   *      output[i, :, ..., :] = params[indices[i], :, ... :]
      +   *      # Vector indices (for each i)
      +   *      ref[indices[i], ...] -= updates[i, ...]
          *
      -   *      # Higher rank indices
      -   *      output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
      +   *      # High rank indices (for each i, ..., j)
      +   *      ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
          *  }
      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their (negated) contributions add. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param data type for {@code output()} output - * @param resource - * @param indices - * @param dtype + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to subtract from `ref`. * @param options carries optional attributes values - * @return a new instance of ResourceGather - */ - public ResourceGather resourceGather(Operand resource, - Operand indices, DataType dtype, ResourceGather.Options... options) { - return ResourceGather.create(scope, resource, indices, dtype, options); - } - - /** - * - * @param data type for {@code output()} output - * @param resource - * @param indices - * @param dtype - * @return a new instance of ResourceGatherNd + * @return a new instance of ScatterSub */ - public ResourceGatherNd resourceGatherNd( - Operand resource, Operand indices, DataType dtype) { - return ResourceGatherNd.create(scope, resource, indices, dtype); + public ScatterSub scatterSub(Operand ref, + Operand indices, Operand updates, ScatterSub.Options... options) { + return ScatterSub.create(scope, ref, indices, updates, options); } /** - * Adds sparse updates to the variable referenced by `resource`. + * Applies sparse updates to a variable reference. *

      * This operation computes - *

      + *

      {@code
          *      # Scalar indices
      -   *      ref[indices, ...] += updates[...]
      -   *  

      + * ref[indices, ...] = updates[...] + * * # Vector indices (for each i) - * ref[indices[i], ...] += updates[i, ...] - *

      + * ref[indices[i], ...] = updates[i, ...] + * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * }

      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions add. + * If values in `ref` is to be updated more than once, because there are + * duplicate entries in `indices`, the order at which the updates happen + * for each value is undefined. *

      * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *

      *

      - * + * *
      + *

      + * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. * - * @param resource Should be from a `Variable` node. + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterAdd + * @param updates A tensor of updated values to store in `ref`. + * @param options carries optional attributes values + * @return a new instance of ScatterUpdate */ - public ResourceScatterAdd resourceScatterAdd( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterAdd.create(scope, resource, indices, updates); + public ScatterUpdate scatterUpdate(Operand ref, + Operand indices, Operand updates, ScatterUpdate.Options... options) { + return ScatterUpdate.create(scope, ref, indices, updates, options); } /** - * Divides sparse updates into the variable referenced by `resource`. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] /= updates[...] - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] /= updates[i, ...] - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions multiply. - *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - *

      - *

      - * - *
      * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterDiv + * @param data type for {@code output()} output + * @param condition + * @param t + * @param e + * @return a new instance of Select */ - public ResourceScatterDiv resourceScatterDiv( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterDiv.create(scope, resource, indices, updates); + public Select select(Operand condition, Operand t, Operand e) { + return Select.create(scope, condition, t, e); } /** - * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] = max(ref[indices, ...], updates[...]) - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * Computes the difference between two lists of numbers or strings. *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions are combined. + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` *

      - *

      - * - *
      + * For example, given this input: + *
      {@code
      +   *  x = [1, 2, 3, 4, 5, 6]
      +   *  y = [1, 3, 5]
      +   *  }
      + * This operation would return: + *
      {@code
      +   *  out ==> [2, 4, 6]
      +   *  idx ==> [1, 3, 5]
      +   *  }
      * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterMax + * @param data type for {@code out()} output + * @param data type for {@code idx()} output + * @param x 1-D. Values to keep. + * @param y 1-D. Values to remove. + * @return a new instance of SetDiff1d */ - public ResourceScatterMax resourceScatterMax( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterMax.create(scope, resource, indices, updates); + public SetDiff1d setDiff1d(Operand x, Operand y) { + return SetDiff1d.create(scope, x, y); } /** - * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] = min(ref[indices, ...], updates[...]) - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * Computes the difference between two lists of numbers or strings. *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions are combined. + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` *

      - *

      - * - *
      + * For example, given this input: + *
      {@code
      +   *  x = [1, 2, 3, 4, 5, 6]
      +   *  y = [1, 3, 5]
      +   *  }
      + * This operation would return: + *
      {@code
      +   *  out ==> [2, 4, 6]
      +   *  idx ==> [1, 3, 5]
      +   *  }
      * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterMin + * @param data type for {@code out()} output + * @param data type for {@code idx()} output + * @param x 1-D. Values to keep. + * @param y 1-D. Values to remove. + * @param outIdx + * @return a new instance of SetDiff1d */ - public ResourceScatterMin resourceScatterMin( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterMin.create(scope, resource, indices, updates); + public SetDiff1d setDiff1d(Operand x, Operand y, + DataType outIdx) { + return SetDiff1d.create(scope, x, y, outIdx); } /** - * Multiplies sparse updates into the variable referenced by `resource`. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] *= updates[...] - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] *= updates[i, ...] - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions multiply. + * Number of unique elements along last dimension of input `set`. *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, + * and `set_shape`. The last dimension contains values in a set, duplicates are + * allowed but ignored. *

      - *

      - * - *
      + * If `validate_indices` is `True`, this op validates the order and range of `set` + * indices. * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterMul + * @param setIndices 2D `Tensor`, indices of a `SparseTensor`. + * @param setValues 1D `Tensor`, values of a `SparseTensor`. + * @param setShape 1D `Tensor`, shape of a `SparseTensor`. + * @param options carries optional attributes values + * @return a new instance of SetSize */ - public ResourceScatterMul resourceScatterMul( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterMul.create(scope, resource, indices, updates); + public SetSize setSize(Operand setIndices, Operand setValues, + Operand setShape, SetSize.Options... options) { + return SetSize.create(scope, setIndices, setValues, setShape, options); } /** - * Applies sparse addition to individual values or slices in a Variable. - *

      - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

      - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * Returns the shape of a tensor. *

      - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. + * This operation returns a 1-D integer tensor representing the shape of `input`. *

      - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

      {@code
      -   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
      -   *  }
      - * For example, say we want to add 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that addition would look like this: + * For example: *
      {@code
      -   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
      -   *  indices = tf.constant([[4], [3], [1], [7]])
      -   *  updates = tf.constant([9, 10, 11, 12])
      -   *  add = tf.scatter_nd_add(ref, indices, updates)
      -   *  with tf.Session() as sess:
      -   *    print sess.run(add)
      +   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
      +   *  shape(t) ==> [2, 2, 3]
          *  }
      - * The resulting update to ref would look like this: - *

      - * [1, 13, 3, 14, 14, 6, 7, 20] - *

      - * See `tf.scatter_nd` for more details about how to make updates to - * slices. * - * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of - * values to add to ref. - * @param options carries optional attributes values - * @return a new instance of ResourceScatterNdAdd + * @param data type for {@code output()} output + * @param input + * @return a new instance of Shape */ - public ResourceScatterNdAdd resourceScatterNdAdd( - Operand ref, Operand indices, Operand updates, - ResourceScatterNdAdd.Options... options) { - return ResourceScatterNdAdd.create(scope, ref, indices, updates, options); + public org.tensorflow.op.core.Shape shape(Operand input) { + return org.tensorflow.op.core.Shape.create(scope, input); } /** - * Applies sparse subtraction to individual values or slices in a Variable. - *

      - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

      - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * Returns the shape of a tensor. *

      - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. + * This operation returns a 1-D integer tensor representing the shape of `input`. *

      - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * For example: *

      {@code
      -   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
      +   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
      +   *  shape(t) ==> [2, 2, 3]
          *  }
      - * For example, say we want to subtract 4 scattered elements from a rank-1 tensor - * with 8 elements. In Python, that subtraction would look like this: - *
      {@code
      -   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
      -   *  indices = tf.constant([[4], [3], [1], [7]])
      -   *  updates = tf.constant([9, 10, 11, 12])
      -   *  sub = tf.scatter_nd_sub(ref, indices, updates)
      -   *  with tf.Session() as sess:
      -   *    print sess.run(sub)
      -   *  }
      - * The resulting update to ref would look like this: - *

      - * [1, -9, 3, -6, -4, 6, 7, -4] - *

      - * See `tf.scatter_nd` for more details about how to make updates to - * slices. * - * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of - * values to add to ref. - * @param options carries optional attributes values - * @return a new instance of ResourceScatterNdSub + * @param data type for {@code output()} output + * @param input + * @param outType + * @return a new instance of Shape */ - public ResourceScatterNdSub resourceScatterNdSub( - Operand ref, Operand indices, Operand updates, - ResourceScatterNdSub.Options... options) { - return ResourceScatterNdSub.create(scope, ref, indices, updates, options); + public org.tensorflow.op.core.Shape shape( + Operand input, DataType outType) { + return org.tensorflow.op.core.Shape.create(scope, input, outType); } /** - * Applies sparse `updates` to individual values or slices within a given - *

      - * variable according to `indices`. + * Returns shape of tensors. *

      - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * + * @param data type for {@code output()} output + * @param input + * @return a new instance of ShapeN + */ + public ShapeN shapeN(Iterable> input) { + return ShapeN.create(scope, input); + } + + /** + * Returns shape of tensors. *

      - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * + * @param data type for {@code output()} output + * @param input + * @param outType + * @return a new instance of ShapeN + */ + public ShapeN shapeN(Iterable> input, + DataType outType) { + return ShapeN.create(scope, input, outType); + } + + /** + * Returns the size of a tensor. *

      - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. + * This operation returns an integer representing the number of elements in + * `input`. *

      - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

      {@code
      -   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
      -   *  }
      - * For example, say we want to update 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that update would look like this: + * For example: *
      {@code
      -   *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
      -   *      indices = tf.constant([[4], [3], [1] ,[7]])
      -   *      updates = tf.constant([9, 10, 11, 12])
      -   *      update = tf.scatter_nd_update(ref, indices, updates)
      -   *      with tf.Session() as sess:
      -   *        print sess.run(update)
      +   *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
      +   *  size(t) ==> 12
          *  }
      - * The resulting update to ref would look like this: - *

      - * [1, 11, 3, 10, 9, 6, 7, 12] - *

      - * See `tf.scatter_nd` for more details about how to make updates to - * slices. * - * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated - * values to add to ref. - * @param options carries optional attributes values - * @return a new instance of ResourceScatterNdUpdate + * @param data type for {@code output()} output + * @param input + * @return a new instance of Size */ - public ResourceScatterNdUpdate resourceScatterNdUpdate( - Operand ref, Operand indices, Operand updates, - ResourceScatterNdUpdate.Options... options) { - return ResourceScatterNdUpdate.create(scope, ref, indices, updates, options); + public Size size(Operand input) { + return Size.create(scope, input); } /** - * Subtracts sparse updates from the variable referenced by `resource`. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] -= updates[...] - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] -= updates[i, ...] - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions add. + * Returns the size of a tensor. *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * This operation returns an integer representing the number of elements in + * `input`. *

      - *

      - * - *
      + * For example: + *
      {@code
      +   *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
      +   *  size(t) ==> 12
      +   *  }
      * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterSub + * @param data type for {@code output()} output + * @param input + * @param outType + * @return a new instance of Size */ - public ResourceScatterSub resourceScatterSub( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterSub.create(scope, resource, indices, updates); + public Size size(Operand input, DataType outType) { + return Size.create(scope, input, outType); } /** - * Assigns sparse updates to the variable referenced by `resource`. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] = updates[...] - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] = updates[i, ...] - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * Parses a text file and creates a batch of examples. * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterUpdate + * @param filename The corpus's text file name. + * @param batchSize The size of produced batch. + * @param options carries optional attributes values + * @return a new instance of Skipgram */ - public ResourceScatterUpdate resourceScatterUpdate( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterUpdate.create(scope, resource, indices, updates); + public Skipgram skipgram(String filename, Long batchSize, Skipgram.Options... options) { + return Skipgram.create(scope, filename, batchSize, options); } /** - * Assign `value` to the sliced l-value reference of `ref`. + * Return a slice from 'input'. *

      - * The values of `value` are assigned to the positions in the variable - * `ref` that are selected by the slice parameters. The slice parameters - * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. + * The output tensor is a tensor with dimensions described by 'size' + * whose values are extracted from 'input' starting at the offsets in + * 'begin'. *

      - * NOTE this op currently does not support broadcasting and so `value`'s - * shape must be exactly the shape produced by the slice of `ref`. + * Requirements: + * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) * - * @param ref - * @param begin - * @param end - * @param strides - * @param value - * @param options carries optional attributes values - * @return a new instance of ResourceStridedSliceAssign + * @param data type for {@code output()} output + * @param input + * @param begin begin[i] specifies the offset into the 'i'th dimension of + * 'input' to slice from. + * @param size size[i] specifies the number of elements of the 'i'th dimension + * of 'input' to slice. If size[i] is -1, all remaining elements in dimension + * i are included in the slice (i.e. this is equivalent to setting + * size[i] = input.dim_size(i) - begin[i]). + * @return a new instance of Slice */ - public ResourceStridedSliceAssign resourceStridedSliceAssign( - Operand ref, Operand begin, Operand end, Operand strides, Operand value, - ResourceStridedSliceAssign.Options... options) { - return ResourceStridedSliceAssign.create(scope, ref, begin, end, strides, value, options); + public Slice slice(Operand input, Operand begin, + Operand size) { + return Slice.create(scope, input, begin, size); } /** - * Reverses specific dimensions of a tensor. + * Returns a copy of the input tensor. + * + * @param data type for {@code output()} output + * @param input + * @return a new instance of Snapshot + */ + public Snapshot snapshot(Operand input) { + return Snapshot.create(scope, input); + } + + /** + * SpaceToBatch for N-D tensors of type T. *

      - * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. - * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. + * This operation divides "spatial" dimensions `[1, ..., M]` of the input into a + * grid of blocks of shape `block_shape`, and interleaves these blocks with the + * "batch" dimension (0) such that in the output, the spatial dimensions + * `[1, ..., M]` correspond to the position within the grid, and the batch + * dimension combines both the position within a spatial block and the original + * batch position. Prior to division into blocks, the spatial dimensions of the + * input are optionally zero padded according to `paddings`. See below for a + * precise description. + * + * @param data type for {@code output()} output + * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + * where spatial_shape has `M` dimensions. + * @param blockShape 1-D with shape `[M]`, all values must be >= 1. + * @param paddings 2-D with shape `[M, 2]`, all values must be >= 0. + * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension + * `i + 1`, which corresponds to spatial dimension `i`. It is required that + * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. *

      - * Given a `tensor`, and a `int32` tensor `axis` representing the set of - * dimensions of `tensor` to reverse. This operation reverses each dimension - * `i` for which there exists `j` s.t. `axis[j] == i`. + * This operation is equivalent to the following steps: *

      - * `tensor` can have up to 8 dimensions. The number of dimensions specified - * in `axis` may be 0 or more entries. If an index is specified more than - * once, a InvalidArgument error is raised. + * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the + * input according to `paddings` to produce `padded` of shape `padded_shape`. *

      - * For example: - *

      {@code
      -   *  # tensor 't' is [[[[ 0,  1,  2,  3],
      -   *  #                  [ 4,  5,  6,  7],
      -   *  #                  [ 8,  9, 10, 11]],
      -   *  #                 [[12, 13, 14, 15],
      -   *  #                  [16, 17, 18, 19],
      -   *  #                  [20, 21, 22, 23]]]]
      -   *  # tensor 't' shape is [1, 2, 3, 4]
      -   *
      -   *  # 'dims' is [3] or 'dims' is [-1]
      -   *  reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
      -   *                          [ 7,  6,  5,  4],
      -   *                          [ 11, 10, 9, 8]],
      -   *                         [[15, 14, 13, 12],
      -   *                          [19, 18, 17, 16],
      -   *                          [23, 22, 21, 20]]]]
      -   *
      -   *  # 'dims' is '[1]' (or 'dims' is '[-3]')
      -   *  reverse(t, dims) ==> [[[[12, 13, 14, 15],
      -   *                          [16, 17, 18, 19],
      -   *                          [20, 21, 22, 23]
      -   *                         [[ 0,  1,  2,  3],
      -   *                          [ 4,  5,  6,  7],
      -   *                          [ 8,  9, 10, 11]]]]
      -   *
      -   *  # 'dims' is '[2]' (or 'dims' is '[-2]')
      -   *  reverse(t, dims) ==> [[[[8, 9, 10, 11],
      -   *                          [4, 5, 6, 7],
      -   *                          [0, 1, 2, 3]]
      -   *                         [[20, 21, 22, 23],
      -   *                          [16, 17, 18, 19],
      -   *                          [12, 13, 14, 15]]]]
      +   *  2. Reshape `padded` to `reshaped_padded` of shape:
      +   *  

      + * [batch] + + * [padded_shape[1] / block_shape[0], + * block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1], + * block_shape[M-1]] + + * remaining_shape + *

      + * 3. Permute dimensions of `reshaped_padded` to produce + * `permuted_reshaped_padded` of shape: + *

      + * block_shape + + * [batch] + + * [padded_shape[1] / block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1]] + + * remaining_shape + *

      + * 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch + * dimension, producing an output tensor of shape: + *

      + * [batch * prod(block_shape)] + + * [padded_shape[1] / block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1]] + + * remaining_shape + *

      + * Some examples: + *

      + * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and + * `paddings = [[0, 0], [0, 0]]`: + *

      {@code
      +   *  x = [[[[1], [2]], [[3], [4]]]]
      +   *  }
      + * The output tensor has shape `[4, 1, 1, 1]` and value: + *
      {@code
      +   *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
          *  }
      + * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and + * `paddings = [[0, 0], [0, 0]]`: + *
      {@code
      +   *  x = [[[[1, 2, 3], [4, 5, 6]],
      +   *        [[7, 8, 9], [10, 11, 12]]]]
      +   *  }
      + * The output tensor has shape `[4, 1, 1, 3]` and value: + *
      {@code
      +   *  [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
      +   *  }
      + * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and + * `paddings = [[0, 0], [0, 0]]`: + *
      {@code
      +   *  x = [[[[1],   [2],  [3],  [4]],
      +   *        [[5],   [6],  [7],  [8]],
      +   *        [[9],  [10], [11],  [12]],
      +   *        [[13], [14], [15],  [16]]]]
      +   *  }
      + * The output tensor has shape `[4, 2, 2, 1]` and value: + *
      {@code
      +   *  x = [[[[1], [3]], [[9], [11]]],
      +   *       [[[2], [4]], [[10], [12]]],
      +   *       [[[5], [7]], [[13], [15]]],
      +   *       [[[6], [8]], [[14], [16]]]]
      +   *  }
      + * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and + * paddings = `[[0, 0], [2, 0]]`: + *
      {@code
      +   *  x = [[[[1],   [2],  [3],  [4]],
      +   *        [[5],   [6],  [7],  [8]]],
      +   *       [[[9],  [10], [11],  [12]],
      +   *        [[13], [14], [15],  [16]]]]
      +   *  }
      + * The output tensor has shape `[8, 1, 3, 1]` and value: + *
      {@code
      +   *  x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
      +   *       [[[0], [2], [4]]], [[[0], [10], [12]]],
      +   *       [[[0], [5], [7]]], [[[0], [13], [15]]],
      +   *       [[[0], [6], [8]]], [[[0], [14], [16]]]]
      +   *  }
      + * Among others, this operation is useful for reducing atrous convolution into + * regular convolution. + * @return a new instance of SpaceToBatchNd + */ + public SpaceToBatchNd spaceToBatchNd( + Operand input, Operand blockShape, Operand paddings) { + return SpaceToBatchNd.create(scope, input, blockShape, paddings); + } + + /** + * Splits a tensor into `num_split` tensors along one dimension. * * @param data type for {@code output()} output - * @param tensor Up to 8-D. - * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range - * `[-rank(tensor), rank(tensor))`. - * @return a new instance of Reverse + * @param axis 0-D. The dimension along which to split. Must be in the range + * `[-rank(value), rank(value))`. + * @param value The tensor to split. + * @param numSplit The number of ways to split. Must evenly divide + * `value.shape[split_dim]`. + * @return a new instance of Split */ - public Reverse reverse(Operand tensor, - Operand axis) { - return Reverse.create(scope, tensor, axis); + public Split split(Operand axis, Operand value, Long numSplit) { + return Split.create(scope, axis, value, numSplit); } /** - * Reverses variable length slices. - *

      - * This op first slices `input` along the dimension `batch_dim`, and for each - * slice `i`, reverses the first `seq_lengths[i]` elements along - * the dimension `seq_dim`. - *

      - * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, - * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. + * Splits a tensor into `num_split` tensors along one dimension. + * + * @param data type for {@code output()} output + * @param value The tensor to split. + * @param sizeSplits list containing the sizes of each output tensor along the split + * dimension. Must sum to the dimension of value along split_dim. + * Can contain one -1 indicating that dimension is to be inferred. + * @param axis 0-D. The dimension along which to split. Must be in the range + * `[-rank(value), rank(value))`. + * @param numSplit + * @return a new instance of SplitV + */ + public SplitV splitV(Operand value, + Operand sizeSplits, Operand axis, Long numSplit) { + return SplitV.create(scope, value, sizeSplits, axis, numSplit); + } + + /** + * Removes dimensions of size 1 from the shape of a tensor. *

      - * The output slice `i` along dimension `batch_dim` is then given by input - * slice `i`, with the first `seq_lengths[i]` slices along dimension - * `seq_dim` reversed. + * Given a tensor `input`, this operation returns a tensor of the same type with + * all dimensions of size 1 removed. If you don't want to remove all size 1 + * dimensions, you can remove specific size 1 dimensions by specifying + * `axis`. *

      * For example: *

      {@code
      -   *  # Given this:
      -   *  batch_dim = 0
      -   *  seq_dim = 1
      -   *  input.dims = (4, 8, ...)
      -   *  seq_lengths = [7, 2, 3, 5]
      -   *
      -   *  # then slices of input are reversed on seq_dim, but only up to seq_lengths:
      -   *  output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
      -   *  output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
      -   *  output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
      -   *  output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
      -   *
      -   *  # while entries past seq_lens are copied through:
      -   *  output[0, 7:, :, ...] = input[0, 7:, :, ...]
      -   *  output[1, 2:, :, ...] = input[1, 2:, :, ...]
      -   *  output[2, 3:, :, ...] = input[2, 3:, :, ...]
      -   *  output[3, 2:, :, ...] = input[3, 2:, :, ...]
      +   *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
      +   *  shape(squeeze(t)) ==> [2, 3]
          *  }
      - * In contrast, if: + * Or, to remove specific size 1 dimensions: *
      {@code
      -   *  # Given this:
      -   *  batch_dim = 2
      -   *  seq_dim = 0
      -   *  input.dims = (8, ?, 4, ...)
      -   *  seq_lengths = [7, 2, 3, 5]
      -   *
      -   *  # then slices of input are reversed on seq_dim, but only up to seq_lengths:
      -   *  output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
      -   *  output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
      -   *  output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
      -   *  output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
      -   *
      -   *  # while entries past seq_lens are copied through:
      -   *  output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
      -   *  output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
      -   *  output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
      -   *  output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
      +   *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
      +   *  shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
          *  }
      * * @param data type for {@code output()} output - * @param input The input to reverse. - * @param seqLengths 1-D with length `input.dims(batch_dim)` and - * `max(seq_lengths) <= input.dims(seq_dim)` - * @param seqDim The dimension which is partially reversed. + * @param input The `input` to squeeze. * @param options carries optional attributes values - * @return a new instance of ReverseSequence + * @return a new instance of Squeeze */ - public ReverseSequence reverseSequence(Operand input, - Operand seqLengths, Long seqDim, ReverseSequence.Options... options) { - return ReverseSequence.create(scope, input, seqLengths, seqDim, options); + public Squeeze squeeze(Operand input, Squeeze.Options... options) { + return Squeeze.create(scope, input, options); } /** - * Rolls the elements of a tensor along an axis. + * Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. *

      - * The elements are shifted positively (towards larger indices) by the offset of - * `shift` along the dimension of `axis`. Negative `shift` values will shift - * elements in the opposite direction. Elements that roll passed the last position - * will wrap around to the first and vice versa. Multiple shifts along multiple - * axes may be specified. + * Packs the `N` tensors in `values` into a tensor with rank one higher than each + * tensor in `values`, by packing them along the `axis` dimension. + * Given a list of tensors of shape `(A, B, C)`; + *

      + * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. + * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. + * Etc. *

      * For example: *

      {@code
      -   *  # 't' is [0, 1, 2, 3, 4]
      -   *  roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
      -   *
      -   *  # shifting along multiple dimensions
      -   *  # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
      -   *  roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
      -   *
      -   *  # shifting along the same axis multiple times
      -   *  # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
      -   *  roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
      +   *  # 'x' is [1, 4]
      +   *  # 'y' is [2, 5]
      +   *  # 'z' is [3, 6]
      +   *  pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
      +   *  pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
          *  }
      + * This is the opposite of `unpack`. * * @param data type for {@code output()} output - * @param input - * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which - * elements are shifted positively (towards larger indices) along the dimension - * specified by `axis[i]`. Negative shifts will roll the elements in the opposite - * direction. - * @param axis Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift - * `shift[i]` should occur. If the same axis is referenced more than once, the - * total shift for that axis will be the sum of all the shifts that belong to that - * axis. - * @return a new instance of Roll + * @param values Must be of same shape and type. + * @param options carries optional attributes values + * @return a new instance of Stack */ - public Roll roll(Operand input, - Operand shift, Operand axis) { - return Roll.create(scope, input, shift, axis); + public Stack stack(Iterable> values, Stack.Options... options) { + return Stack.create(scope, values, options); } /** - * Perform batches of RPC requests. - *

      - * This op asynchronously performs either a single RPC request, or a batch - * of requests. RPC requests are defined by three main parameters: - *

      - * - `address` (the host+port or BNS address of the request) - * - `method` (the RPC method name for the request) - * - `request` (the serialized proto string, or vector of strings, - * of the RPC request argument). - *

      - * For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: - *

      {@code
      -   *  service MyService {
      -   *    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
      -   *    }
      -   *  };
      -   *  }
      - * then call this op with arguments: - *
      {@code
      -   *  address = "localhost:2345"
      -   *  method = "MyService/MyMethod"
      -   *  }
      - * The `request` tensor is a string tensor representing serialized `MyRequestProto` - * strings; and the output string tensor `response` will have the same shape - * and contain (upon successful completion) corresponding serialized - * `MyResponseProto` strings. - *

      - * For example, to send a single, empty, `MyRequestProto`, call - * this op with `request = ""`. To send 5 parallel empty requests, - * call this op with `request = ["", "", "", "", ""]`. - *

      - * More generally, one can create a batch of `MyRequestProto` serialized protos - * from regular batched tensors using the `encode_proto` op, and convert - * the response `MyResponseProto` serialized protos to batched tensors - * using the `decode_proto` op. - *

      - * NOTE Working with serialized proto strings is faster than instantiating - * actual proto objects in memory, so no performance degradation is expected - * compared to writing custom kernels for this workflow. - *

      - * If the connection fails or the remote worker returns an error - * status, the op reraises this exception locally. + * Stage values similar to a lightweight Enqueue. *

      - * See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. + * The basic functionality of this Op is similar to a queue with many + * fewer capabilities and options. This Op is optimized for performance. * - * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `method` and `request`. - * @param method `0-D` or `1-D`. The method address on the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `request`. - * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `method`. + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. * @param options carries optional attributes values - * @return a new instance of Rpc + * @return a new instance of Stage */ - public Rpc rpc(Operand address, Operand method, Operand request, - Rpc.Options... options) { - return Rpc.create(scope, address, method, request, options); + public Stage stage(Iterable> values, Stage.Options... options) { + return Stage.create(scope, values, options); } /** - * Adds sparse updates to a variable reference. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] += updates[...] - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] += updates[i, ...] - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] - *

      - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions add. - *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - *

      - *

      - * - *
      + * Op removes all elements in the underlying container. * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. + * @param dtypes * @param options carries optional attributes values - * @return a new instance of ScatterAdd + * @return a new instance of StageClear */ - public ScatterAdd scatterAdd(Operand ref, - Operand indices, Operand updates, ScatterAdd.Options... options) { - return ScatterAdd.create(scope, ref, indices, updates, options); + public StageClear stageClear(List> dtypes, StageClear.Options... options) { + return StageClear.create(scope, dtypes, options); } /** - * Divides a variable reference by sparse updates. - *

      - * This operation computes - *

      {@code
      -   *      # Scalar indices
      -   *      ref[indices, ...] /= updates[...]
      -   *
      -   *      # Vector indices (for each i)
      -   *      ref[indices[i], ...] /= updates[i, ...]
      -   *
      -   *      # High rank indices (for each i, ..., j)
      -   *      ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
      -   *  }
      - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions divide. + * Op peeks at the values at the specified index. If the *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * underlying container does not contain sufficient elements + * this op will block until it does. This Op is optimized for + * performance. * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of values that `ref` is divided by. + * @param index + * @param dtypes * @param options carries optional attributes values - * @return a new instance of ScatterDiv + * @return a new instance of StagePeek */ - public ScatterDiv scatterDiv(Operand ref, - Operand indices, Operand updates, ScatterDiv.Options... options) { - return ScatterDiv.create(scope, ref, indices, updates, options); + public StagePeek stagePeek(Operand index, List> dtypes, + StagePeek.Options... options) { + return StagePeek.create(scope, index, dtypes, options); } /** - * Reduces sparse updates into a variable reference using the `max` operation. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] = max(ref[indices, ...], updates[...]) - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) - *

      - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions combine. - *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - *

      - *

      - * - *
      + * Op returns the number of elements in the underlying container. * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to reduce into `ref`. + * @param dtypes * @param options carries optional attributes values - * @return a new instance of ScatterMax + * @return a new instance of StageSize */ - public ScatterMax scatterMax(Operand ref, - Operand indices, Operand updates, ScatterMax.Options... options) { - return ScatterMax.create(scope, ref, indices, updates, options); + public StageSize stageSize(List> dtypes, StageSize.Options... options) { + return StageSize.create(scope, dtypes, options); } /** - * Reduces sparse updates into a variable reference using the `min` operation. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] = min(ref[indices, ...], updates[...]) - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) - *

      - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. + * Stops gradient computation. *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions combine. + * When executed in a graph, this op outputs its input tensor as-is. *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * When building ops to compute gradients, this op prevents the contribution of + * its inputs to be taken into account. Normally, the gradient generator adds ops + * to a graph to compute the derivatives of a specified 'loss' by recursively + * finding out inputs that contributed to its computation. If you insert this op + * in the graph it inputs are masked from the gradient generator. They are not + * taken into account for computing gradients. *

      - *

      - * - *
      + * This is useful any time you want to compute a value with TensorFlow but need + * to pretend that the value was a constant. Some examples include: + *
        + *
      • + * The EM algorithm where the M-step should not involve backpropagation + * through the output of the E-step. + *
      • + *
      • + * Contrastive divergence training of Boltzmann machines where, when + * differentiating the energy function, the training must not backpropagate + * through the graph that generated the samples from the model. + *
      • + *
      • + * Adversarial training, where no backprop should happen through the adversarial + * example generation process. * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to reduce into `ref`. - * @param options carries optional attributes values - * @return a new instance of ScatterMin + * @param data type for {@code output()} output + * @param input + * @return a new instance of StopGradient */ - public ScatterMin scatterMin(Operand ref, - Operand indices, Operand updates, ScatterMin.Options... options) { - return ScatterMin.create(scope, ref, indices, updates, options); + public StopGradient stopGradient(Operand input) { + return StopGradient.create(scope, input); } /** - * Multiplies sparse updates into a variable reference. + * Return a strided slice from `input`. *

        - * This operation computes - *

        {@code
        -   *      # Scalar indices
        -   *      ref[indices, ...] *= updates[...]
        -   *
        -   *      # Vector indices (for each i)
        -   *      ref[indices[i], ...] *= updates[i, ...]
        -   *
        -   *      # High rank indices (for each i, ..., j)
        -   *      ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
        -   *  }
        - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

        - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions multiply. - *

        - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to multiply to `ref`. - * @param options carries optional attributes values - * @return a new instance of ScatterMul - */ - public ScatterMul scatterMul(Operand ref, - Operand indices, Operand updates, ScatterMul.Options... options) { - return ScatterMul.create(scope, ref, indices, updates, options); - } - - /** - * Scatter `updates` into a new tensor according to `indices`. - *

        - * Creates a new tensor by applying sparse `updates` to individual values or - * slices within a tensor (initially zero for numeric, empty for string) of - * the given `shape` according to indices. This operator is the inverse of the - * `tf.gather_nd` operator which extracts values or slices from a given tensor. - *

        - * This operation is similar to tensor_scatter_add, except that the tensor is - * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical - * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` - *

        - * If `indices` contains duplicates, then their updates are accumulated (summed). - *

        - * WARNING: The order in which updates are applied is nondeterministic, so the - * output will be nondeterministic if `indices` contains duplicates -- because - * of some numerical approximation issues, numbers summed in different order - * may yield different results. - *

        - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + * Note, most python users will want to use the Python `Tensor.__getitem__` + * or `Variable.__getitem__` rather than this op directly. *

        - * indices.shape[-1] <= shape.rank + * The goal of this op is to produce a new tensor with a subset of + * the elements from the `n` dimensional `input` tensor. The subset is chosen using + * a sequence of `m` sparse range specifications encoded into the arguments + * of this function. Note, in some cases + * `m` could be equal to `n`, but this need not be the case. Each + * range specification entry can be one of the following: *

        - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape + * - An ellipsis (...). Ellipses are used to imply zero or more + * dimensions of full-dimension selection and are produced using + * `ellipsis_mask`. For example, `foo[...]` is the identity slice. *

        - * indices.shape[:-1] + shape[indices.shape[-1]:] + * - A new axis. This is used to insert a new shape=1 dimension and is + * produced using `new_axis_mask`. For example, `foo[:, ...]` where + * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. *

        - * The simplest form of scatter is to insert individual elements in a tensor by - * index. For example, say we want to insert 4 scattered elements in a rank-1 - * tensor with 8 elements. + * - A range `begin:end:stride`. This is used to specify how much to choose from + * a given dimension. `stride` can be any integer but 0. `begin` is an integer + * which represents the index of the first value to select while `end` represents + * the index of the last value to select. The number of values selected in each + * dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. + * `begin` and `end` can be negative where `-1` is the last element, `-2` is + * the second to last. `begin_mask` controls whether to replace the explicitly + * given `begin` with an implicit effective value of `0` if `stride > 0` and + * `-1` if `stride < 0`. `end_mask` is analogous but produces the number + * required to create the largest open interval. For example, given a shape + * `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do + * not assume this is equivalent to `foo[0:-1]` which has an effective `begin` + * and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the + * first dimension of a tensor while dropping the last two (in the original + * order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. *

        - *

        - * - *
        + * - A single index. This is used to keep only elements that have a given + * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a + * shape `(6,)` tensor. This is encoded in `begin` and `end` and + * `shrink_axis_mask`. *

        - * In Python, this scatter operation would look like this: + * Each conceptual range specification is encoded in the op's argument. This + * encoding is best understand by considering a non-trivial example. In + * particular, + * `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as *

        {@code
        -   *      indices = tf.constant([[4], [3], [1], [7]])
        -   *      updates = tf.constant([9, 10, 11, 12])
        -   *      shape = tf.constant([8])
        -   *      scatter = tf.scatter_nd(indices, updates, shape)
        -   *      print(scatter)
        +   *  begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
        +   *  end = [2, 4, x, x, -3, x]
        +   *  strides = [1, 1, x, x, -1, 1]
        +   *  begin_mask = 1<<4 | 1 << 5 = 48
        +   *  end_mask = 1<<5 = 32
        +   *  ellipsis_mask = 1<<3 = 8
        +   *  new_axis_mask = 1<<2 4
        +   *  shrink_axis_mask = 1<<0
            *  }
        - * The resulting tensor would look like this: + * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of + * the slice becomes (2, 1, 5, 5, 2, 5). + * Let us walk step by step through each argument specification. *

        - * [0, 11, 0, 10, 9, 0, 0, 12] + * 1. The first argument in the example slice is turned into `begin = 1` and + * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we + * also set the appropriate bit in `shrink_axis_mask`. *

        - * We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. + * 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have + * zero bits contributed. *

        - *

        - * - *
        + * 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 + * dimension in the final shape. Dummy values are contributed to begin, + * end and stride, while the new_axis_mask bit is set. *

        - * In Python, this scatter operation would look like this: - *

        {@code
        -   *      indices = tf.constant([[0], [2]])
        -   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
        -   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
        -   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
        -   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
        -   *      shape = tf.constant([4, 4, 4])
        -   *      scatter = tf.scatter_nd(indices, updates, shape)
        -   *      print(scatter)
        -   *  }
        - * The resulting tensor would look like this: + * 4. `...` grab the full ranges from as many dimensions as needed to + * fully specify a slice for every dimension of the input shape. *

        - * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + * 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated + * with a dimension that has shape `s` is converted to a positive index + * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion + * is done internally so begin, end and strides receive x, -3, and -1. + * The appropriate begin_mask bit is set to indicate the start range is the + * full range (ignoring the x). *

        - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * 6. `:` indicates that the entire contents of the corresponding dimension + * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides + * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and + * `end_mask` are also set. + *

        + * Requirements: + * `0 != strides[i] for i in [0, m)` + * `ellipsis_mask must be a power of two (only one ellipsis)` * - * @param data type for {@code output()} output - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @param shape 1-D. The shape of the resulting tensor. - * @return a new instance of ScatterNd + * @param data type for {@code output()} output + * @param input + * @param begin `begin[k]` specifies the offset into the `k`th range specification. + * The exact dimension this corresponds to will be determined by context. + * Out-of-bounds values will be silently clamped. If the `k`th bit of + * `begin_mask` then `begin[k]` is ignored and the full range of the + * appropriate dimension is used instead. Negative values causes indexing + * to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. + * @param end `end[i]` is like `begin` with the exception that `end_mask` is + * used to determine full ranges. + * @param strides `strides[i]` specifies the increment in the `i`th specification + * after extracting a given element. Negative indices will reverse + * the original order. Out or range values are + * clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` + * @param options carries optional attributes values + * @return a new instance of StridedSlice */ - public ScatterNd scatterNd(Operand indices, - Operand updates, Operand shape) { - return ScatterNd.create(scope, indices, updates, shape); + public StridedSlice stridedSlice(Operand input, + Operand begin, Operand end, Operand strides, StridedSlice.Options... options) { + return StridedSlice.create(scope, input, begin, end, strides, options); } /** - * Applies sparse addition to individual values or slices in a Variable. - *

        - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

        - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - *

        - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - *

        - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

        {@code
        -   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
        -   *  }
        - * For example, say we want to add 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that addition would look like this: - *
        {@code
        -   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
        -   *  indices = tf.constant([[4], [3], [1], [7]])
        -   *  updates = tf.constant([9, 10, 11, 12])
        -   *  add = tf.scatter_nd_add(ref, indices, updates)
        -   *  with tf.Session() as sess:
        -   *    print sess.run(add)
        -   *  }
        - * The resulting update to ref would look like this: + * Assign `value` to the sliced l-value reference of `ref`. *

        - * [1, 13, 3, 14, 14, 6, 7, 20] + * The values of `value` are assigned to the positions in the variable + * `ref` that are selected by the slice parameters. The slice parameters + * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. *

        - * See `tf.scatter_nd` for more details about how to make updates to - * slices. + * NOTE this op currently does not support broadcasting and so `value`'s + * shape must be exactly the shape produced by the slice of `ref`. * * @param data type for {@code outputRef()} output - * @param ref A mutable Tensor. Should be from a Variable node. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to add to ref. + * @param ref + * @param begin + * @param end + * @param strides + * @param value * @param options carries optional attributes values - * @return a new instance of ScatterNdAdd + * @return a new instance of StridedSliceAssign */ - public ScatterNdAdd scatterNdAdd(Operand ref, - Operand indices, Operand updates, ScatterNdAdd.Options... options) { - return ScatterNdAdd.create(scope, ref, indices, updates, options); + public StridedSliceAssign stridedSliceAssign( + Operand ref, Operand begin, Operand end, Operand strides, Operand value, + StridedSliceAssign.Options... options) { + return StridedSliceAssign.create(scope, ref, begin, end, strides, value, options); } /** - * Applies sparse addition to `input` using individual values or slices - *

        - * from `updates` according to indices `indices`. The updates are non-aliasing: - * `input` is only modified in-place if no other operations will use it. - * Otherwise, a copy of `input` is made. This operation has a gradient with - * respect to both `input` and `updates`. - *

        - * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

        - * `indices` must be integer tensor, containing indices into `input`. - * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. + * Returns the gradient of `StridedSlice`. *

        - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or `(P-K)`-dimensional slices - * (if `K < P`) along the `K`th dimension of `input`. + * Since `StridedSlice` cuts out pieces of its `input` which is size + * `shape`, its gradient will have the same shape (which is passed here + * as `shape`). The gradient will be zero in any element that the slice + * does not select. *

        - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

        - * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ - *

        - * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 - * elements. In Python, that addition would look like this: - *

        - * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) - * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) - * with tf.Session() as sess: - * print(sess.run(output)) - *

        - * The resulting value `output` would look like this: - *

        - * [1, 13, 3, 14, 14, 6, 7, 20] - *

        - * See `tf.scatter_nd` for more details about how to make updates to slices. + * Arguments are the same as StridedSliceGrad with the exception that + * `dy` is the input gradient to be propagated and `shape` is the + * shape of `StridedSlice`'s `input`. * - * @param data type for {@code output()} output - * @param input A Tensor. - * @param indices A Tensor. Must be one of the following types: `int32`, `int64`. - * A tensor of indices into `input`. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to add to `input`. - * @return a new instance of ScatterNdNonAliasingAdd + * @param data type for {@code output()} output + * @param shape + * @param begin + * @param end + * @param strides + * @param dy + * @param options carries optional attributes values + * @return a new instance of StridedSliceGrad */ - public ScatterNdNonAliasingAdd scatterNdNonAliasingAdd( - Operand input, Operand indices, Operand updates) { - return ScatterNdNonAliasingAdd.create(scope, input, indices, updates); + public StridedSliceGrad stridedSliceGrad(Operand shape, + Operand begin, Operand end, Operand strides, Operand dy, + StridedSliceGrad.Options... options) { + return StridedSliceGrad.create(scope, shape, begin, end, strides, dy, options); } /** - * Applies sparse subtraction to individual values or slices in a Variable. - *

        - * within a given variable according to `indices`. - *

        - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

        - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - *

        - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - *

        - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

        {@code
        -   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
        -   *  }
        - * For example, say we want to subtract 4 scattered elements from a rank-1 tensor - * with 8 elements. In Python, that subtraction would look like this: - *
        {@code
        -   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
        -   *  indices = tf.constant([[4], [3], [1], [7]])
        -   *  updates = tf.constant([9, 10, 11, 12])
        -   *  sub = tf.scatter_nd_sub(ref, indices, updates)
        -   *  with tf.Session() as sess:
        -   *    print sess.run(sub)
        -   *  }
        - * The resulting update to ref would look like this: - *

        - * [1, -9, 3, -6, -4, 6, 7, -4] + * Computes the sum of elements across dimensions of a tensor. *

        - * See `tf.scatter_nd` for more details about how to make updates to - * slices. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param data type for {@code outputRef()} output - * @param ref A mutable Tensor. Should be from a Variable node. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to subtract from ref. + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of ScatterNdSub + * @return a new instance of Sum */ - public ScatterNdSub scatterNdSub(Operand ref, - Operand indices, Operand updates, ScatterNdSub.Options... options) { - return ScatterNdSub.create(scope, ref, indices, updates, options); + public Sum sum(Operand input, Operand axis, + Sum.Options... options) { + return Sum.create(scope, input, axis, options); } /** - * Applies sparse `updates` to individual values or slices within a given - *

        - * variable according to `indices`. - *

        - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

        - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. - *

        - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - *

        - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

        - * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ - *

        - * For example, say we want to update 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that update would look like this: - *

        {@code
        -   *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
        -   *      indices = tf.constant([[4], [3], [1] ,[7]])
        -   *      updates = tf.constant([9, 10, 11, 12])
        -   *      update = tf.scatter_nd_update(ref, indices, updates)
        -   *      with tf.Session() as sess:
        -   *        print sess.run(update)
        -   *  }
        - * The resulting update to ref would look like this: - *

        - * [1, 11, 3, 10, 9, 6, 7, 12] + * Forwards `data` to the output port determined by `pred`. *

        - * See `tf.scatter_nd` for more details about how to make updates to - * slices. + * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + * the data goes to `output_false`. *

        - * See also `tf.scatter_update` and `tf.batch_scatter_update`. + * See also `RefSwitch` and `Merge`. * - * @param data type for {@code outputRef()} output - * @param ref A mutable Tensor. Should be from a Variable node. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated - * values to add to ref. - * @param options carries optional attributes values - * @return a new instance of ScatterNdUpdate + * @param data type for {@code outputFalse()} output + * @param data The tensor to be forwarded to the appropriate output. + * @param pred A scalar that specifies which output port will receive data. + * @return a new instance of SwitchCond */ - public ScatterNdUpdate scatterNdUpdate(Operand ref, - Operand indices, Operand updates, ScatterNdUpdate.Options... options) { - return ScatterNdUpdate.create(scope, ref, indices, updates, options); + public SwitchCond switchCond(Operand data, Operand pred) { + return SwitchCond.create(scope, data, pred); } /** - * Subtracts sparse updates to a variable reference. + * Returns a tensor that may be mutated, but only persists within a single step. *

        - *

        {@code
        -   *      # Scalar indices
        -   *      ref[indices, ...] -= updates[...]
        -   *
        -   *      # Vector indices (for each i)
        -   *      ref[indices[i], ...] -= updates[i, ...]
        -   *
        -   *      # High rank indices (for each i, ..., j)
        -   *      ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
        -   *  }
        - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. + * This is an experimental op for internal use only and it is possible to use this + * op in unsafe ways. DO NOT USE unless you fully understand the risks. *

        - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their (negated) contributions add. + * It is the caller's responsibility to ensure that 'ref' is eventually passed to a + * matching 'DestroyTemporaryVariable' op after all other uses have completed. *

        - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * Outputs a ref to the tensor state so it may be read or modified. *

        - *

        - * - *
        + * E.g. + * var = state_ops._temporary_variable([1, 2], types.float_) + * var_name = var.op.name + * var = state_ops.assign(var, [[4.0, 5.0]]) + * var = state_ops.assign_add(var, [[6.0, 7.0]]) + * final = state_ops._destroy_temporary_variable(var, var_name=var_name) * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to subtract from `ref`. + * @param data type for {@code ref()} output + * @param shape The shape of the variable tensor. + * @param dtype The type of elements in the variable tensor. * @param options carries optional attributes values - * @return a new instance of ScatterSub + * @return a new instance of TemporaryVariable */ - public ScatterSub scatterSub(Operand ref, - Operand indices, Operand updates, ScatterSub.Options... options) { - return ScatterSub.create(scope, ref, indices, updates, options); + public TemporaryVariable temporaryVariable(Shape shape, DataType dtype, + TemporaryVariable.Options... options) { + return TemporaryVariable.create(scope, shape, dtype, options); } /** - * Applies sparse updates to a variable reference. - *

        - * This operation computes - *

        {@code
        -   *      # Scalar indices
        -   *      ref[indices, ...] = updates[...]
        -   *
        -   *      # Vector indices (for each i)
        -   *      ref[indices[i], ...] = updates[i, ...]
        -   *
        -   *      # High rank indices (for each i, ..., j)
        -   *      ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
        -   *  }
        - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

        - * If values in `ref` is to be updated more than once, because there are - * duplicate entries in `indices`, the order at which the updates happen - * for each value is undefined. - *

        - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - *

        - *

        - * - *
        + * An array of Tensors of given size. *

        - * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. + * Write data via Write and read via Read or Pack. * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to store in `ref`. + * @param size The size of the array. + * @param dtype The type of the elements on the tensor_array. * @param options carries optional attributes values - * @return a new instance of ScatterUpdate + * @return a new instance of TensorArray */ - public ScatterUpdate scatterUpdate(Operand ref, - Operand indices, Operand updates, ScatterUpdate.Options... options) { - return ScatterUpdate.create(scope, ref, indices, updates, options); + public TensorArray tensorArray(Operand size, DataType dtype, + TensorArray.Options... options) { + return TensorArray.create(scope, size, dtype, options); } /** + * Delete the TensorArray from its resource container. + *

        + * This enables the user to close and release the resource in the middle + * of a step/run. * - * @param data type for {@code output()} output - * @param condition - * @param t - * @param e - * @return a new instance of Select + * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + * @return a new instance of TensorArrayClose */ - public Select select(Operand condition, Operand t, Operand e) { - return Select.create(scope, condition, t, e); + public TensorArrayClose tensorArrayClose(Operand handle) { + return TensorArrayClose.create(scope, handle); } /** - * Computes the difference between two lists of numbers or strings. + * Concat the elements from the TensorArray into value `value`. *

        - * Given a list `x` and a list `y`, this operation returns a list `out` that - * represents all values that are in `x` but not in `y`. The returned list `out` - * is sorted in the same order that the numbers appear in `x` (duplicates are - * preserved). This operation also returns a list `idx` that represents the - * position of each `out` element in `x`. In other words: + * Takes `T` elements of shapes *

        - * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + *

        {@code
        +   *    (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
        +   *    }
        + * and concatenates them into a Tensor of shape: *

        - * For example, given this input: - *

        {@code
        -   *  x = [1, 2, 3, 4, 5, 6]
        -   *  y = [1, 3, 5]
        -   *  }
        - * This operation would return: - *
        {@code
        -   *  out ==> [2, 4, 6]
        -   *  idx ==> [1, 3, 5]
        -   *  }
        + *
        {@code
        +   *  (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}
        + * All elements must have the same shape (excepting the first dimension). * - * @param data type for {@code out()} output - * @param data type for {@code idx()} output - * @param x 1-D. Values to keep. - * @param y 1-D. Values to remove. - * @return a new instance of SetDiff1d + * @param data type for {@code value()} output + * @param handle The handle to a TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attributes values + * @return a new instance of TensorArrayConcat */ - public SetDiff1d setDiff1d(Operand x, Operand y) { - return SetDiff1d.create(scope, x, y); + public TensorArrayConcat tensorArrayConcat(Operand handle, + Operand flowIn, DataType dtype, TensorArrayConcat.Options... options) { + return TensorArrayConcat.create(scope, handle, flowIn, dtype, options); } /** - * Computes the difference between two lists of numbers or strings. - *

        - * Given a list `x` and a list `y`, this operation returns a list `out` that - * represents all values that are in `x` but not in `y`. The returned list `out` - * is sorted in the same order that the numbers appear in `x` (duplicates are - * preserved). This operation also returns a list `idx` that represents the - * position of each `out` element in `x`. In other words: - *

        - * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + * Gather specific elements from the TensorArray into output `value`. *

        - * For example, given this input: - *

        {@code
        -   *  x = [1, 2, 3, 4, 5, 6]
        -   *  y = [1, 3, 5]
        -   *  }
        - * This operation would return: - *
        {@code
        -   *  out ==> [2, 4, 6]
        -   *  idx ==> [1, 3, 5]
        -   *  }
        + * All elements selected by `indices` must have the same shape. * - * @param data type for {@code out()} output - * @param data type for {@code idx()} output - * @param x 1-D. Values to keep. - * @param y 1-D. Values to remove. - * @param outIdx - * @return a new instance of SetDiff1d + * @param data type for {@code value()} output + * @param handle The handle to a TensorArray. + * @param indices The locations in the TensorArray from which to read tensor elements. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attributes values + * @return a new instance of TensorArrayGather */ - public SetDiff1d setDiff1d(Operand x, Operand y, - DataType outIdx) { - return SetDiff1d.create(scope, x, y, outIdx); + public TensorArrayGather tensorArrayGather(Operand handle, + Operand indices, Operand flowIn, DataType dtype, + TensorArrayGather.Options... options) { + return TensorArrayGather.create(scope, handle, indices, flowIn, dtype, options); } /** - * Number of unique elements along last dimension of input `set`. + * Creates a TensorArray for storing the gradients of values in the given handle. *

        - * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, - * and `set_shape`. The last dimension contains values in a set, duplicates are - * allowed but ignored. + * If the given TensorArray gradient already exists, returns a reference to it. *

        - * If `validate_indices` is `True`, this op validates the order and range of `set` - * indices. + * Locks the size of the original TensorArray by disabling its dynamic size flag. + *

        + * *A note about the input flow_in:** + *

        + * The handle flow_in forces the execution of the gradient lookup to occur + * only after certain other operations have occurred. For example, when + * the forward TensorArray is dynamically sized, writes to this TensorArray + * may resize the object. The gradient TensorArray is statically sized based + * on the size of the forward TensorArray when this operation executes. + * Furthermore, the size of the forward TensorArray is frozen by this call. + * As a result, the flow is used to ensure that the call to generate the gradient + * TensorArray only happens after all writes are executed. + *

        + * In the case of dynamically sized TensorArrays, gradient computation should + * only be performed on read operations that have themselves been chained via + * flow to occur only after all writes have executed. That way the final size + * of the forward TensorArray is known when this operation is called. + *

        + * *A note about the source attribute:** + *

        + * TensorArray gradient calls use an accumulator TensorArray object. If + * multiple gradients are calculated and run in the same session, the multiple + * gradient nodes may accidentally flow through the same accumulator TensorArray. + * This double counts and generally breaks the TensorArray gradient flow. + *

        + * The solution is to identify which gradient call this particular + * TensorArray gradient is being called in. This is performed by identifying + * a unique string (e.g. "gradients", "gradients_1", ...) from the input + * gradient Tensor's name. This string is used as a suffix when creating + * the TensorArray gradient object here (the attribute `source`). + *

        + * The attribute `source` is added as a suffix to the forward TensorArray's + * name when performing the creation / lookup, so that each separate gradient + * calculation gets its own TensorArray accumulator. * - * @param setIndices 2D `Tensor`, indices of a `SparseTensor`. - * @param setValues 1D `Tensor`, values of a `SparseTensor`. - * @param setShape 1D `Tensor`, shape of a `SparseTensor`. - * @param options carries optional attributes values - * @return a new instance of SetSize + * @param handle The handle to the forward TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param source The gradient source string, used to decide which gradient TensorArray + * to return. + * @return a new instance of TensorArrayGrad */ - public SetSize setSize(Operand setIndices, Operand setValues, - Operand setShape, SetSize.Options... options) { - return SetSize.create(scope, setIndices, setValues, setShape, options); + public TensorArrayGrad tensorArrayGrad(Operand handle, Operand flowIn, + String source) { + return TensorArrayGrad.create(scope, handle, flowIn, source); } /** - * Returns the shape of a tensor. - *

        - * This operation returns a 1-D integer tensor representing the shape of `input`. + * Creates a TensorArray for storing multiple gradients of values in the given handle. *

        - * For example: - *

        {@code
        -   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
        -   *  shape(t) ==> [2, 2, 3]
        -   *  }
        + * Similar to TensorArrayGradV3. However it creates an accumulator with an + * expanded shape compared to the input TensorArray whose gradient is being + * computed. This enables multiple gradients for the same TensorArray to be + * calculated using the same accumulator. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of Shape + * @param handle The handle to the forward TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param shapeToPrepend An int32 vector representing a shape. Elements in the gradient accumulator will + * have shape which is this shape_to_prepend value concatenated with shape of the + * elements in the TensorArray corresponding to the input handle. + * @param source The gradient source string, used to decide which gradient TensorArray + * to return. + * @return a new instance of TensorArrayGradWithShape */ - public org.tensorflow.op.core.Shape shape(Operand input) { - return org.tensorflow.op.core.Shape.create(scope, input); + public TensorArrayGradWithShape tensorArrayGradWithShape(Operand handle, + Operand flowIn, Operand shapeToPrepend, String source) { + return TensorArrayGradWithShape.create(scope, handle, flowIn, shapeToPrepend, source); } /** - * Returns the shape of a tensor. - *

        - * This operation returns a 1-D integer tensor representing the shape of `input`. - *

        - * For example: - *

        {@code
        -   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
        -   *  shape(t) ==> [2, 2, 3]
        -   *  }
        * - * @param data type for {@code output()} output - * @param input - * @param outType - * @return a new instance of Shape + * @param data type for {@code value()} output + * @param handle + * @param flowIn + * @param dtype + * @param options carries optional attributes values + * @return a new instance of TensorArrayPack */ - public org.tensorflow.op.core.Shape shape( - Operand input, DataType outType) { - return org.tensorflow.op.core.Shape.create(scope, input, outType); + public TensorArrayPack tensorArrayPack(Operand handle, + Operand flowIn, DataType dtype, TensorArrayPack.Options... options) { + return TensorArrayPack.create(scope, handle, flowIn, dtype, options); } /** - * Returns shape of tensors. - *

        - * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * Read an element from the TensorArray into output `value`. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of ShapeN + * @param data type for {@code value()} output + * @param handle The handle to a TensorArray. + * @param index + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @return a new instance of TensorArrayRead */ - public ShapeN shapeN(Iterable> input) { - return ShapeN.create(scope, input); + public TensorArrayRead tensorArrayRead(Operand handle, + Operand index, Operand flowIn, DataType dtype) { + return TensorArrayRead.create(scope, handle, index, flowIn, dtype); } /** - * Returns shape of tensors. + * Scatter the data from the input value into specific TensorArray elements. *

        - * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * `indices` must be a vector, its length must match the first dim of `value`. * - * @param data type for {@code output()} output - * @param input - * @param outType - * @return a new instance of ShapeN + * @param handle The handle to a TensorArray. + * @param indices The locations at which to write the tensor elements. + * @param value The concatenated tensor to write to the TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArrayScatter */ - public ShapeN shapeN(Iterable> input, - DataType outType) { - return ShapeN.create(scope, input, outType); + public TensorArrayScatter tensorArrayScatter(Operand handle, + Operand indices, Operand value, Operand flowIn) { + return TensorArrayScatter.create(scope, handle, indices, value, flowIn); } /** - * Returns the size of a tensor. - *

        - * This operation returns an integer representing the number of elements in - * `input`. - *

        - * For example: - *

        {@code
        -   *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
        -   *  size(t) ==> 12
        -   *  }
        + * Get the current size of the TensorArray. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of Size + * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArraySize */ - public Size size(Operand input) { - return Size.create(scope, input); + public TensorArraySize tensorArraySize(Operand handle, Operand flowIn) { + return TensorArraySize.create(scope, handle, flowIn); } /** - * Returns the size of a tensor. + * Split the data from the input value into TensorArray elements. *

        - * This operation returns an integer representing the number of elements in - * `input`. + * Assuming that `lengths` takes on values *

        - * For example: - *

        {@code
        -   *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
        -   *  size(t) ==> 12
        -   *  }
        + *
        {@code
        +   *  (n0, n1, ..., n(T-1))}
        + * and that `value` has shape + *

        + *

        {@code
        +   *  (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}
        + * , + *

        + * this splits values into a TensorArray with T tensors. + *

        + * TensorArray index t will be the subtensor of values with starting position + *

        + *

        {@code
        +   *  (n0 + n1 + ... + n(t-1), 0, 0, ...)}
        + * and having size + *

        + *

        {@code
        +   *  nt x d0 x d1 x ...}
        * - * @param data type for {@code output()} output - * @param input - * @param outType - * @return a new instance of Size + * @param handle The handle to a TensorArray. + * @param value The concatenated tensor to write to the TensorArray. + * @param lengths The vector of lengths, how to split the rows of value into the + * TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArraySplit */ - public Size size(Operand input, DataType outType) { - return Size.create(scope, input, outType); + public TensorArraySplit tensorArraySplit(Operand handle, Operand value, + Operand lengths, Operand flowIn) { + return TensorArraySplit.create(scope, handle, value, lengths, flowIn); } /** - * Parses a text file and creates a batch of examples. * - * @param filename The corpus's text file name. - * @param batchSize The size of produced batch. - * @param options carries optional attributes values - * @return a new instance of Skipgram + * @param handle + * @param value + * @param flowIn + * @return a new instance of TensorArrayUnpack */ - public Skipgram skipgram(String filename, Long batchSize, Skipgram.Options... options) { - return Skipgram.create(scope, filename, batchSize, options); + public TensorArrayUnpack tensorArrayUnpack(Operand handle, + Operand value, Operand flowIn) { + return TensorArrayUnpack.create(scope, handle, value, flowIn); } /** - * Return a slice from 'input'. + * Push an element onto the tensor_array. + * + * @param handle The handle to a TensorArray. + * @param index The position to write to inside the TensorArray. + * @param value The tensor to write to the TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArrayWrite + */ + public TensorArrayWrite tensorArrayWrite(Operand handle, + Operand index, Operand value, Operand flowIn) { + return TensorArrayWrite.create(scope, handle, index, value, flowIn); + } + + /** + * Concats all tensors in the list along the 0th dimension. *

        - * The output tensor is a tensor with dimensions described by 'size' - * whose values are extracted from 'input' starting at the offsets in - * 'begin'. + * Requires that all tensors have the same shape except the first dimension. *

        - * Requirements: - * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) + * input_handle: The input list. + * element_shape: The shape of the uninitialized elements in the list. If the first + * dimension is not -1, it is assumed that all list elements have the same + * leading dim. + * leading_dims: The list of leading dims of uninitialized list elements. Used if + * the leading dim of input_handle.element_shape or the element_shape input arg + * is not already set. + * tensor: The concated result. + * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. * - * @param data type for {@code output()} output - * @param input - * @param begin begin[i] specifies the offset into the 'i'th dimension of - * 'input' to slice from. - * @param size size[i] specifies the number of elements of the 'i'th dimension - * of 'input' to slice. If size[i] is -1, all remaining elements in dimension - * i are included in the slice (i.e. this is equivalent to setting - * size[i] = input.dim_size(i) - begin[i]). - * @return a new instance of Slice + * @param data type for {@code tensor()} output + * @param inputHandle + * @param elementShape + * @param leadingDims + * @param elementDtype + * @return a new instance of TensorListConcat */ - public Slice slice(Operand input, Operand begin, - Operand size) { - return Slice.create(scope, input, begin, size); + public TensorListConcat tensorListConcat( + Operand inputHandle, Operand elementShape, Operand leadingDims, + DataType elementDtype) { + return TensorListConcat.create(scope, inputHandle, elementShape, leadingDims, elementDtype); } /** - * Returns a copy of the input tensor. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of Snapshot + * @param inputA + * @param inputB + * @param elementDtype + * @return a new instance of TensorListConcatLists */ - public Snapshot snapshot(Operand input) { - return Snapshot.create(scope, input); + public TensorListConcatLists tensorListConcatLists(Operand inputA, + Operand inputB, DataType elementDtype) { + return TensorListConcatLists.create(scope, inputA, inputB, elementDtype); } /** - * SpaceToBatch for N-D tensors of type T. + * The shape of the elements of the given list, as a tensor. *

        - * This operation divides "spatial" dimensions `[1, ..., M]` of the input into a - * grid of blocks of shape `block_shape`, and interleaves these blocks with the - * "batch" dimension (0) such that in the output, the spatial dimensions - * `[1, ..., M]` correspond to the position within the grid, and the batch - * dimension combines both the position within a spatial block and the original - * batch position. Prior to division into blocks, the spatial dimensions of the - * input are optionally zero padded according to `paddings`. See below for a - * precise description. + * input_handle: the list + * element_shape: the shape of elements of the list * - * @param data type for {@code output()} output - * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - * where spatial_shape has `M` dimensions. - * @param blockShape 1-D with shape `[M]`, all values must be >= 1. - * @param paddings 2-D with shape `[M, 2]`, all values must be >= 0. - * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension - * `i + 1`, which corresponds to spatial dimension `i`. It is required that - * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. + * @param data type for {@code elementShape()} output + * @param inputHandle + * @param shapeType + * @return a new instance of TensorListElementShape + */ + public TensorListElementShape tensorListElementShape( + Operand inputHandle, DataType shapeType) { + return TensorListElementShape.create(scope, inputHandle, shapeType); + } + + /** + * Creates a TensorList which, when stacked, has the value of `tensor`. *

        - * This operation is equivalent to the following steps: + * Each tensor in the result list corresponds to one row of the input tensor. *

        - * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the - * input according to `paddings` to produce `padded` of shape `padded_shape`. + * tensor: The input tensor. + * output_handle: The list. + * + * @param tensor + * @param elementShape + * @return a new instance of TensorListFromTensor + */ + public TensorListFromTensor tensorListFromTensor( + Operand tensor, Operand elementShape) { + return TensorListFromTensor.create(scope, tensor, elementShape); + } + + /** + * Creates a Tensor by indexing into the TensorList. *

        - * 2. Reshape `padded` to `reshaped_padded` of shape: + * Each row in the produced Tensor corresponds to the element in the TensorList + * specified by the given index (see `tf.gather`). *

        - * [batch] + - * [padded_shape[1] / block_shape[0], - * block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1], - * block_shape[M-1]] + - * remaining_shape + * input_handle: The input tensor list. + * indices: The indices used to index into the list. + * values: The tensor. + * + * @param data type for {@code values()} output + * @param inputHandle + * @param indices + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListGather + */ + public TensorListGather tensorListGather(Operand inputHandle, + Operand indices, Operand elementShape, DataType elementDtype) { + return TensorListGather.create(scope, inputHandle, indices, elementShape, elementDtype); + } + + /** + * + * @param data type for {@code item()} output + * @param inputHandle + * @param index + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListGetItem + */ + public TensorListGetItem tensorListGetItem(Operand inputHandle, + Operand index, Operand elementShape, DataType elementDtype) { + return TensorListGetItem.create(scope, inputHandle, index, elementShape, elementDtype); + } + + /** + * Returns the number of tensors in the input tensor list. *

        - * 3. Permute dimensions of `reshaped_padded` to produce - * `permuted_reshaped_padded` of shape: + * input_handle: the input list + * length: the number of tensors in the list + * + * @param inputHandle + * @return a new instance of TensorListLength + */ + public TensorListLength tensorListLength(Operand inputHandle) { + return TensorListLength.create(scope, inputHandle); + } + + /** + * Returns the last element of the input list as well as a list with all but that element. *

        - * block_shape + - * [batch] + - * [padded_shape[1] / block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1]] + - * remaining_shape + * Fails if the list is empty. *

        - * 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch - * dimension, producing an output tensor of shape: + * input_handle: the input list + * tensor: the withdrawn last element of the list + * element_dtype: the type of elements in the list + * element_shape: the shape of the output tensor + * + * @param data type for {@code tensor()} output + * @param inputHandle + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListPopBack + */ + public TensorListPopBack tensorListPopBack(Operand inputHandle, + Operand elementShape, DataType elementDtype) { + return TensorListPopBack.create(scope, inputHandle, elementShape, elementDtype); + } + + /** + * Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`. *

        - * [batch * prod(block_shape)] + - * [padded_shape[1] / block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1]] + - * remaining_shape - *

        - * Some examples: - *

        - * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and - * `paddings = [[0, 0], [0, 0]]`: - *

        {@code
        -   *  x = [[[[1], [2]], [[3], [4]]]]
        -   *  }
        - * The output tensor has shape `[4, 1, 1, 1]` and value: - *
        {@code
        -   *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
        -   *  }
        - * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and - * `paddings = [[0, 0], [0, 0]]`: - *
        {@code
        -   *  x = [[[[1, 2, 3], [4, 5, 6]],
        -   *        [[7, 8, 9], [10, 11, 12]]]]
        -   *  }
        - * The output tensor has shape `[4, 1, 1, 3]` and value: - *
        {@code
        -   *  [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
        -   *  }
        - * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and - * `paddings = [[0, 0], [0, 0]]`: - *
        {@code
        -   *  x = [[[[1],   [2],  [3],  [4]],
        -   *        [[5],   [6],  [7],  [8]],
        -   *        [[9],  [10], [11],  [12]],
        -   *        [[13], [14], [15],  [16]]]]
        -   *  }
        - * The output tensor has shape `[4, 2, 2, 1]` and value: - *
        {@code
        -   *  x = [[[[1], [3]], [[9], [11]]],
        -   *       [[[2], [4]], [[10], [12]]],
        -   *       [[[5], [7]], [[13], [15]]],
        -   *       [[[6], [8]], [[14], [16]]]]
        -   *  }
        - * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and - * paddings = `[[0, 0], [2, 0]]`: - *
        {@code
        -   *  x = [[[[1],   [2],  [3],  [4]],
        -   *        [[5],   [6],  [7],  [8]]],
        -   *       [[[9],  [10], [11],  [12]],
        -   *        [[13], [14], [15],  [16]]]]
        -   *  }
        - * The output tensor has shape `[8, 1, 3, 1]` and value: - *
        {@code
        -   *  x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
        -   *       [[[0], [2], [4]]], [[[0], [10], [12]]],
        -   *       [[[0], [5], [7]]], [[[0], [13], [15]]],
        -   *       [[[0], [6], [8]]], [[[0], [14], [16]]]]
        -   *  }
        - * Among others, this operation is useful for reducing atrous convolution into - * regular convolution. - * @return a new instance of SpaceToBatchNd + * tensor: The tensor to put on the list. + * input_handle: The old list. + * output_handle: A list with the elements of the old list followed by tensor. + * element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. + * + * @param inputHandle + * @param tensor + * @return a new instance of TensorListPushBack */ - public SpaceToBatchNd spaceToBatchNd( - Operand input, Operand blockShape, Operand paddings) { - return SpaceToBatchNd.create(scope, input, blockShape, paddings); + public TensorListPushBack tensorListPushBack(Operand inputHandle, + Operand tensor) { + return TensorListPushBack.create(scope, inputHandle, tensor); } /** - * Splits a tensor into `num_split` tensors along one dimension. * - * @param data type for {@code output()} output - * @param axis 0-D. The dimension along which to split. Must be in the range - * `[-rank(value), rank(value))`. - * @param value The tensor to split. - * @param numSplit The number of ways to split. Must evenly divide - * `value.shape[split_dim]`. - * @return a new instance of Split + * @param inputHandles + * @param tensor + * @return a new instance of TensorListPushBackBatch */ - public Split split(Operand axis, Operand value, Long numSplit) { - return Split.create(scope, axis, value, numSplit); + public TensorListPushBackBatch tensorListPushBackBatch(Operand inputHandles, + Operand tensor) { + return TensorListPushBackBatch.create(scope, inputHandles, tensor); } /** - * Splits a tensor into `num_split` tensors along one dimension. + * List of the given size with empty elements. + *

        + * element_shape: the shape of the future elements of the list + * num_elements: the number of elements to reserve + * handle: the output list + * element_dtype: the desired type of elements in the list. * - * @param data type for {@code output()} output - * @param value The tensor to split. - * @param sizeSplits list containing the sizes of each output tensor along the split - * dimension. Must sum to the dimension of value along split_dim. - * Can contain one -1 indicating that dimension is to be inferred. - * @param axis 0-D. The dimension along which to split. Must be in the range - * `[-rank(value), rank(value))`. - * @param numSplit - * @return a new instance of SplitV + * @param elementShape + * @param numElements + * @param elementDtype + * @return a new instance of TensorListReserve */ - public SplitV splitV(Operand value, - Operand sizeSplits, Operand axis, Long numSplit) { - return SplitV.create(scope, value, sizeSplits, axis, numSplit); + public TensorListReserve tensorListReserve( + Operand elementShape, Operand numElements, DataType elementDtype) { + return TensorListReserve.create(scope, elementShape, numElements, elementDtype); } /** - * Removes dimensions of size 1 from the shape of a tensor. - *

        - * Given a tensor `input`, this operation returns a tensor of the same type with - * all dimensions of size 1 removed. If you don't want to remove all size 1 - * dimensions, you can remove specific size 1 dimensions by specifying - * `axis`. + * Resizes the list. *

        - * For example: - *

        {@code
        -   *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
        -   *  shape(squeeze(t)) ==> [2, 3]
        -   *  }
        - * Or, to remove specific size 1 dimensions: - *
        {@code
        -   *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
        -   *  shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
        -   *  }
        * - * @param data type for {@code output()} output - * @param input The `input` to squeeze. - * @param options carries optional attributes values - * @return a new instance of Squeeze + * input_handle: the input list + * size: size of the output list + * + * @param inputHandle + * @param size + * @return a new instance of TensorListResize */ - public Squeeze squeeze(Operand input, Squeeze.Options... options) { - return Squeeze.create(scope, input, options); + public TensorListResize tensorListResize(Operand inputHandle, Operand size) { + return TensorListResize.create(scope, inputHandle, size); } /** - * Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. - *

        - * Packs the `N` tensors in `values` into a tensor with rank one higher than each - * tensor in `values`, by packing them along the `axis` dimension. - * Given a list of tensors of shape `(A, B, C)`; + * Creates a TensorList by indexing into a Tensor. *

        - * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. - * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. - * Etc. + * Each member of the TensorList corresponds to one row of the input tensor, + * specified by the given index (see `tf.gather`). *

        - * For example: - *

        {@code
        -   *  # 'x' is [1, 4]
        -   *  # 'y' is [2, 5]
        -   *  # 'z' is [3, 6]
        -   *  pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
        -   *  pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
        -   *  }
        - * This is the opposite of `unpack`. + * tensor: The input tensor. + * indices: The indices used to index into the list. + * element_shape: The shape of the elements in the list (can be less specified than + * the shape of the tensor). + * num_elements: The size of the output list. Must be large enough to accommodate + * the largest index in indices. If -1, the list is just large enough to include + * the largest index in indices. + * output_handle: The TensorList. * - * @param data type for {@code output()} output - * @param values Must be of same shape and type. - * @param options carries optional attributes values - * @return a new instance of Stack + * @param tensor + * @param indices + * @param elementShape + * @param numElements + * @return a new instance of TensorListScatter */ - public Stack stack(Iterable> values, Stack.Options... options) { - return Stack.create(scope, values, options); + public TensorListScatter tensorListScatter(Operand tensor, + Operand indices, Operand elementShape, Operand numElements) { + return TensorListScatter.create(scope, tensor, indices, elementShape, numElements); } /** - * Stage values similar to a lightweight Enqueue. + * Scatters tensor at indices in an input list. *

        - * The basic functionality of this Op is similar to a queue with many - * fewer capabilities and options. This Op is optimized for performance. + * Each member of the TensorList corresponds to one row of the input tensor, + * specified by the given index (see `tf.gather`). + *

        + * input_handle: The list to scatter into. + * tensor: The input tensor. + * indices: The indices used to index into the list. + * output_handle: The TensorList. * - * @param values a list of tensors - * dtypes A list of data types that inserted values should adhere to. - * @param options carries optional attributes values - * @return a new instance of Stage + * @param inputHandle + * @param tensor + * @param indices + * @return a new instance of TensorListScatterIntoExistingList */ - public Stage stage(Iterable> values, Stage.Options... options) { - return Stage.create(scope, values, options); + public TensorListScatterIntoExistingList tensorListScatterIntoExistingList( + Operand inputHandle, Operand tensor, Operand indices) { + return TensorListScatterIntoExistingList.create(scope, inputHandle, tensor, indices); } /** - * Op removes all elements in the underlying container. * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of StageClear + * @param inputHandle + * @param index + * @param item + * @return a new instance of TensorListSetItem */ - public StageClear stageClear(List> dtypes, StageClear.Options... options) { - return StageClear.create(scope, dtypes, options); + public TensorListSetItem tensorListSetItem(Operand inputHandle, + Operand index, Operand item) { + return TensorListSetItem.create(scope, inputHandle, index, item); } /** - * Op peeks at the values at the specified index. If the + * Splits a tensor into a list. *

        - * underlying container does not contain sufficient elements - * this op will block until it does. This Op is optimized for - * performance. + * list[i] corresponds to lengths[i] tensors from the input tensor. + * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. + *

        + * tensor: The input tensor. + * element_shape: A shape compatible with that of elements in the tensor. + * lengths: Vector of sizes of the 0th dimension of tensors in the list. + * output_handle: The list. * - * @param index - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of StagePeek + * @param tensor + * @param elementShape + * @param lengths + * @return a new instance of TensorListSplit */ - public StagePeek stagePeek(Operand index, List> dtypes, - StagePeek.Options... options) { - return StagePeek.create(scope, index, dtypes, options); + public TensorListSplit tensorListSplit(Operand tensor, + Operand elementShape, Operand lengths) { + return TensorListSplit.create(scope, tensor, elementShape, lengths); } /** - * Op returns the number of elements in the underlying container. - * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of StageSize - */ - public StageSize stageSize(List> dtypes, StageSize.Options... options) { - return StageSize.create(scope, dtypes, options); - } - - /** - * Stops gradient computation. - *

        - * When executed in a graph, this op outputs its input tensor as-is. + * Stacks all tensors in the list. *

        - * When building ops to compute gradients, this op prevents the contribution of - * its inputs to be taken into account. Normally, the gradient generator adds ops - * to a graph to compute the derivatives of a specified 'loss' by recursively - * finding out inputs that contributed to its computation. If you insert this op - * in the graph it inputs are masked from the gradient generator. They are not - * taken into account for computing gradients. + * Requires that all tensors have the same shape. *

        - * This is useful any time you want to compute a value with TensorFlow but need - * to pretend that the value was a constant. Some examples include: - *

          - *
        • - * The EM algorithm where the M-step should not involve backpropagation - * through the output of the E-step. - *
        • - *
        • - * Contrastive divergence training of Boltzmann machines where, when - * differentiating the energy function, the training must not backpropagate - * through the graph that generated the samples from the model. - *
        • - *
        • - * Adversarial training, where no backprop should happen through the adversarial - * example generation process. + * input_handle: the input list + * tensor: the gathered result + * num_elements: optional. If not -1, the number of elements in the list. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of StopGradient + * @param data type for {@code tensor()} output + * @param inputHandle + * @param elementShape + * @param elementDtype + * @param options carries optional attributes values + * @return a new instance of TensorListStack */ - public StopGradient stopGradient(Operand input) { - return StopGradient.create(scope, input); + public TensorListStack tensorListStack(Operand inputHandle, + Operand elementShape, DataType elementDtype, TensorListStack.Options... options) { + return TensorListStack.create(scope, inputHandle, elementShape, elementDtype, options); } /** - * Return a strided slice from `input`. + * Adds sparse `updates` to an existing tensor according to `indices`. *

          - * Note, most python users will want to use the Python `Tensor.__getitem__` - * or `Variable.__getitem__` rather than this op directly. + * This operation creates a new tensor by adding sparse `updates` to the passed + * in `tensor`. + * This operation is very similar to `tf.scatter_nd_add`, except that the updates + * are added onto an existing tensor (as opposed to a variable). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. *

          - * The goal of this op is to produce a new tensor with a subset of - * the elements from the `n` dimensional `input` tensor. The subset is chosen using - * a sequence of `m` sparse range specifications encoded into the arguments - * of this function. Note, in some cases - * `m` could be equal to `n`, but this need not be the case. Each - * range specification entry can be one of the following: + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: *

          - * - An ellipsis (...). Ellipses are used to imply zero or more - * dimensions of full-dimension selection and are produced using - * `ellipsis_mask`. For example, `foo[...]` is the identity slice. + * indices.shape[-1] <= shape.rank *

          - * - A new axis. This is used to insert a new shape=1 dimension and is - * produced using `new_axis_mask`. For example, `foo[:, ...]` where - * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape *

          - * - A range `begin:end:stride`. This is used to specify how much to choose from - * a given dimension. `stride` can be any integer but 0. `begin` is an integer - * which represents the index of the first value to select while `end` represents - * the index of the last value to select. The number of values selected in each - * dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. - * `begin` and `end` can be negative where `-1` is the last element, `-2` is - * the second to last. `begin_mask` controls whether to replace the explicitly - * given `begin` with an implicit effective value of `0` if `stride > 0` and - * `-1` if `stride < 0`. `end_mask` is analogous but produces the number - * required to create the largest open interval. For example, given a shape - * `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do - * not assume this is equivalent to `foo[0:-1]` which has an effective `begin` - * and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the - * first dimension of a tensor while dropping the last two (in the original - * order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. + * indices.shape[:-1] + shape[indices.shape[-1]:] *

          - * - A single index. This is used to keep only elements that have a given - * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a - * shape `(6,)` tensor. This is encoded in `begin` and `end` and - * `shrink_axis_mask`. + * The simplest form of tensor_scatter_add is to add individual elements to a + * tensor by index. For example, say we want to add 4 elements in a rank-1 + * tensor with 8 elements. *

          - * Each conceptual range specification is encoded in the op's argument. This - * encoding is best understand by considering a non-trivial example. In - * particular, - * `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as + * In Python, this scatter add operation would look like this: *

          {@code
          -   *  begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
          -   *  end = [2, 4, x, x, -3, x]
          -   *  strides = [1, 1, x, x, -1, 1]
          -   *  begin_mask = 1<<4 | 1 << 5 = 48
          -   *  end_mask = 1<<5 = 32
          -   *  ellipsis_mask = 1<<3 = 8
          -   *  new_axis_mask = 1<<2 4
          -   *  shrink_axis_mask = 1<<0
          +   *      indices = tf.constant([[4], [3], [1], [7]])
          +   *      updates = tf.constant([9, 10, 11, 12])
          +   *      tensor = tf.ones([8], dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
          +   *      print(updated)
              *  }
          - * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of - * the slice becomes (2, 1, 5, 5, 2, 5). - * Let us walk step by step through each argument specification. - *

          - * 1. The first argument in the example slice is turned into `begin = 1` and - * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we - * also set the appropriate bit in `shrink_axis_mask`. - *

          - * 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have - * zero bits contributed. + * The resulting tensor would look like this: *

          - * 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 - * dimension in the final shape. Dummy values are contributed to begin, - * end and stride, while the new_axis_mask bit is set. + * [1, 12, 1, 11, 10, 1, 1, 13] *

          - * 4. `...` grab the full ranges from as many dimensions as needed to - * fully specify a slice for every dimension of the input shape. + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. *

          - * 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated - * with a dimension that has shape `s` is converted to a positive index - * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion - * is done internally so begin, end and strides receive x, -3, and -1. - * The appropriate begin_mask bit is set to indicate the start range is the - * full range (ignoring the x). + * In Python, this scatter add operation would look like this: + *

          {@code
          +   *      indices = tf.constant([[0], [2]])
          +   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          +   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          +   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
          +   *      print(updated)
          +   *  }
          + * The resulting tensor would look like this: *

          - * 6. `:` indicates that the entire contents of the corresponding dimension - * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides - * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and - * `end_mask` are also set. + * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] *

          - * Requirements: - * `0 != strides[i] for i in [0, m)` - * `ellipsis_mask must be a power of two (only one ellipsis)` + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. * * @param data type for {@code output()} output - * @param input - * @param begin `begin[k]` specifies the offset into the `k`th range specification. - * The exact dimension this corresponds to will be determined by context. - * Out-of-bounds values will be silently clamped. If the `k`th bit of - * `begin_mask` then `begin[k]` is ignored and the full range of the - * appropriate dimension is used instead. Negative values causes indexing - * to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. - * @param end `end[i]` is like `begin` with the exception that `end_mask` is - * used to determine full ranges. - * @param strides `strides[i]` specifies the increment in the `i`th specification - * after extracting a given element. Negative indices will reverse - * the original order. Out or range values are - * clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` - * @param options carries optional attributes values - * @return a new instance of StridedSlice + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdAdd */ - public StridedSlice stridedSlice(Operand input, - Operand begin, Operand end, Operand strides, StridedSlice.Options... options) { - return StridedSlice.create(scope, input, begin, end, strides, options); + public TensorScatterNdAdd tensorScatterNdAdd( + Operand tensor, Operand indices, Operand updates) { + return TensorScatterNdAdd.create(scope, tensor, indices, updates); } /** - * Assign `value` to the sliced l-value reference of `ref`. + * Subtracts sparse `updates` from an existing tensor according to `indices`. *

          - * The values of `value` are assigned to the positions in the variable - * `ref` that are selected by the slice parameters. The slice parameters - * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. + * This operation creates a new tensor by subtracting sparse `updates` from the + * passed in `tensor`. + * This operation is very similar to `tf.scatter_nd_sub`, except that the updates + * are subtracted from an existing tensor (as opposed to a variable). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. *

          - * NOTE this op currently does not support broadcasting and so `value`'s - * shape must be exactly the shape produced by the slice of `ref`. - * - * @param data type for {@code outputRef()} output - * @param ref - * @param begin - * @param end - * @param strides - * @param value - * @param options carries optional attributes values - * @return a new instance of StridedSliceAssign - */ - public StridedSliceAssign stridedSliceAssign( - Operand ref, Operand begin, Operand end, Operand strides, Operand value, - StridedSliceAssign.Options... options) { - return StridedSliceAssign.create(scope, ref, begin, end, strides, value, options); - } - - /** - * Returns the gradient of `StridedSlice`. + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: *

          - * Since `StridedSlice` cuts out pieces of its `input` which is size - * `shape`, its gradient will have the same shape (which is passed here - * as `shape`). The gradient will be zero in any element that the slice - * does not select. + * indices.shape[-1] <= shape.rank *

          - * Arguments are the same as StridedSliceGrad with the exception that - * `dy` is the input gradient to be propagated and `shape` is the - * shape of `StridedSlice`'s `input`. - * - * @param data type for {@code output()} output - * @param shape - * @param begin - * @param end - * @param strides - * @param dy - * @param options carries optional attributes values - * @return a new instance of StridedSliceGrad - */ - public StridedSliceGrad stridedSliceGrad(Operand shape, - Operand begin, Operand end, Operand strides, Operand dy, - StridedSliceGrad.Options... options) { - return StridedSliceGrad.create(scope, shape, begin, end, strides, dy, options); - } - - /** - * Computes the sum of elements across dimensions of a tensor. + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape *

          - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * indices.shape[:-1] + shape[indices.shape[-1]:] + *

          + * The simplest form of tensor_scatter_sub is to subtract individual elements + * from a tensor by index. For example, say we want to insert 4 scattered elements + * in a rank-1 tensor with 8 elements. + *

          + * In Python, this scatter subtract operation would look like this: + *

          {@code
          +   *      indices = tf.constant([[4], [3], [1], [7]])
          +   *      updates = tf.constant([9, 10, 11, 12])
          +   *      tensor = tf.ones([8], dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
          +   *      print(updated)
          +   *  }
          + * The resulting tensor would look like this: + *

          + * [1, -10, 1, -9, -8, 1, 1, -11] + *

          + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + *

          + * In Python, this scatter add operation would look like this: + *

          {@code
          +   *      indices = tf.constant([[0], [2]])
          +   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          +   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          +   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
          +   *      print(updated)
          +   *  }
          + * The resulting tensor would look like this: + *

          + * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + *

          + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. * * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values - * @return a new instance of Sum + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdSub */ - public Sum sum(Operand input, Operand axis, - Sum.Options... options) { - return Sum.create(scope, input, axis, options); + public TensorScatterNdSub tensorScatterNdSub( + Operand tensor, Operand indices, Operand updates) { + return TensorScatterNdSub.create(scope, tensor, indices, updates); } /** - * Forwards `data` to the output port determined by `pred`. + * Scatter `updates` into an existing tensor according to `indices`. *

          - * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, - * the data goes to `output_false`. + * This operation creates a new tensor by applying sparse `updates` to the passed + * in `tensor`. + * This operation is very similar to `tf.scatter_nd`, except that the updates are + * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. *

          - * See also `RefSwitch` and `Merge`. + * If `indices` contains duplicates, then their updates are accumulated (summed). + *

          + * WARNING: The order in which updates are applied is nondeterministic, so the + * output will be nondeterministic if `indices` contains duplicates -- because + * of some numerical approximation issues, numbers summed in different order + * may yield different results. + *

          + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + *

          + * indices.shape[-1] <= shape.rank + *

          + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape + *

          + * indices.shape[:-1] + shape[indices.shape[-1]:] + *

          + * The simplest form of scatter is to insert individual elements in a tensor by + * index. For example, say we want to insert 4 scattered elements in a rank-1 + * tensor with 8 elements. + *

          + *

          + * + *
          + *

          + * In Python, this scatter operation would look like this: + *

          {@code
          +   *      indices = tf.constant([[4], [3], [1], [7]])
          +   *      updates = tf.constant([9, 10, 11, 12])
          +   *      tensor = tf.ones([8], dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_update(tensor, indices, updates)
          +   *      print(updated)
          +   *  }
          + * The resulting tensor would look like this: + *

          + * [1, 11, 1, 10, 9, 1, 1, 12] + *

          + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + *

          + * In Python, this scatter operation would look like this: + *

          {@code
          +   *      indices = tf.constant([[0], [2]])
          +   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          +   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          +   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_update(tensor, indices, updates)
          +   *      print(updated)
          +   *  }
          + * The resulting tensor would look like this: + *

          + * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + *

          + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. * - * @param data type for {@code outputFalse()} output - * @param data The tensor to be forwarded to the appropriate output. - * @param pred A scalar that specifies which output port will receive data. - * @return a new instance of SwitchCond + * @param data type for {@code output()} output + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdUpdate */ - public SwitchCond switchCond(Operand data, Operand pred) { - return SwitchCond.create(scope, data, pred); + public TensorScatterNdUpdate tensorScatterNdUpdate( + Operand tensor, Operand indices, Operand updates) { + return TensorScatterNdUpdate.create(scope, tensor, indices, updates); } /** - * Returns a tensor that may be mutated, but only persists within a single step. - *

          - * This is an experimental op for internal use only and it is possible to use this - * op in unsafe ways. DO NOT USE unless you fully understand the risks. - *

          - * It is the caller's responsibility to ensure that 'ref' is eventually passed to a - * matching 'DestroyTemporaryVariable' op after all other uses have completed. + * Assign `value` to the sliced l-value reference of `input`. *

          - * Outputs a ref to the tensor state so it may be read or modified. + * The values of `value` are assigned to the positions in the tensor `input` that + * are selected by the slice parameters. The slice parameters `begin` `end` + * `strides` etc. work exactly as in `StridedSlice`. *

          - * E.g. - * var = state_ops._temporary_variable([1, 2], types.float_) - * var_name = var.op.name - * var = state_ops.assign(var, [[4.0, 5.0]]) - * var = state_ops.assign_add(var, [[6.0, 7.0]]) - * final = state_ops._destroy_temporary_variable(var, var_name=var_name) + * NOTE this op currently does not support broadcasting and so `value`'s shape + * must be exactly the shape produced by the slice of `input`. * - * @param data type for {@code ref()} output - * @param shape The shape of the variable tensor. - * @param dtype The type of elements in the variable tensor. + * @param data type for {@code output()} output + * @param input + * @param begin + * @param end + * @param strides + * @param value * @param options carries optional attributes values - * @return a new instance of TemporaryVariable + * @return a new instance of TensorStridedSliceUpdate */ - public TemporaryVariable temporaryVariable(Shape shape, DataType dtype, - TemporaryVariable.Options... options) { - return TemporaryVariable.create(scope, shape, dtype, options); + public TensorStridedSliceUpdate tensorStridedSliceUpdate( + Operand input, Operand begin, Operand end, Operand strides, Operand value, + TensorStridedSliceUpdate.Options... options) { + return TensorStridedSliceUpdate.create(scope, input, begin, end, strides, value, options); } /** - * An array of Tensors of given size. + * Constructs a tensor by tiling a given tensor. *

          - * Write data via Write and read via Read or Pack. + * This operation creates a new tensor by replicating `input` `multiples` times. + * The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, + * and the values of `input` are replicated `multiples[i]` times along the 'i'th + * dimension. For example, tiling `[a b c d]` by `[2]` produces + * `[a b c d a b c d]`. + *

          + * >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) + * >>> b = tf.constant([1,2], tf.int32) + * >>> tf.tile(a, b) + * + * >>> c = tf.constant([2,1], tf.int32) + * >>> tf.tile(a, c) + * + * >>> d = tf.constant([2,2], tf.int32) + * >>> tf.tile(a, d) + * * - * @param size The size of the array. - * @param dtype The type of the elements on the tensor_array. - * @param options carries optional attributes values - * @return a new instance of TensorArray + * @param data type for {@code output()} output + * @param input 1-D or higher. + * @param multiples 1-D. Length must be the same as the number of dimensions in `input` + * @return a new instance of Tile */ - public TensorArray tensorArray(Operand size, DataType dtype, - TensorArray.Options... options) { - return TensorArray.create(scope, size, dtype, options); + public Tile tile(Operand input, Operand multiples) { + return Tile.create(scope, input, multiples); } /** - * Delete the TensorArray from its resource container. + * Provides the time since epoch in seconds. *

          - * This enables the user to close and release the resource in the middle - * of a step/run. + * Returns the timestamp as a `float64` for seconds since the Unix epoch. + *

          + * Note: the timestamp is computed when the op is executed, not when it is added + * to the graph. * - * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). - * @return a new instance of TensorArrayClose + * @return a new instance of Timestamp */ - public TensorArrayClose tensorArrayClose(Operand handle) { - return TensorArrayClose.create(scope, handle); + public Timestamp timestamp() { + return Timestamp.create(scope); } /** - * Concat the elements from the TensorArray into value `value`. + * Perform batches of RPC requests. *

          - * Takes `T` elements of shapes + * This op asynchronously performs either a single RPC request, or a batch + * of requests. RPC requests are defined by three main parameters: *

          - *

          {@code
          -   *    (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
          -   *    }
          - * and concatenates them into a Tensor of shape: + * - `address` (the host+port or BNS address of the request) + * - `method` (the method name for the request) + * - `request` (the serialized proto string, or vector of strings, + * of the RPC request argument). *

          - *

          {@code
          -   *  (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}
          - * All elements must have the same shape (excepting the first dimension). + * For example, if you have an RPC service running on port localhost:2345, + * and its interface is configured with the following proto declaration: + *
          {@code
          +   *  service MyService {
          +   *    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
          +   *    }
          +   *  };
          +   *  }
          + * then call this op with arguments: + *
          {@code
          +   *  address = "localhost:2345"
          +   *  method = "MyService/MyMethod"
          +   *  }
          + * The `request` tensor is a string tensor representing serialized `MyRequestProto` + * strings; and the output string tensor `response` will have the same shape + * and contain (upon successful completion) corresponding serialized + * `MyResponseProto` strings. + *

          + * For example, to send a single, empty, `MyRequestProto`, call + * this op with `request = ""`. To send 5 parallel empty requests, + * call this op with `request = ["", "", "", "", ""]`. + *

          + * More generally, one can create a batch of `MyRequestProto` serialized protos + * from regular batched tensors using the `encode_proto` op, and convert + * the response `MyResponseProto` serialized protos to batched tensors + * using the `decode_proto` op. + *

          + * NOTE Working with serialized proto strings is faster than instantiating + * actual proto objects in memory, so no performance degradation is expected + * compared to writing custom kernels for this workflow. + *

          + * Unlike the standard `Rpc` op, if the connection fails or the remote worker + * returns an error status, this op does not reraise the exception. + * Instead, the `status_code` and `status_message` entry for the corresponding RPC + * call is set with the error returned from the RPC call. The `response` tensor + * will contain valid response values for those minibatch entries whose RPCs did + * not fail; the rest of the entries will have empty strings. * - * @param data type for {@code value()} output - * @param handle The handle to a TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @param dtype The type of the elem that is returned. + * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `method` and `request`. + * @param method `0-D` or `1-D`. The method address on the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `request`. + * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `method`. * @param options carries optional attributes values - * @return a new instance of TensorArrayConcat + * @return a new instance of TryRpc */ - public TensorArrayConcat tensorArrayConcat(Operand handle, - Operand flowIn, DataType dtype, TensorArrayConcat.Options... options) { - return TensorArrayConcat.create(scope, handle, flowIn, dtype, options); + public TryRpc tryRpc(Operand address, Operand method, Operand request, + TryRpc.Options... options) { + return TryRpc.create(scope, address, method, request, options); } /** - * Gather specific elements from the TensorArray into output `value`. + * Reverses the operation of Batch for a single output Tensor. *

          - * All elements selected by `indices` must have the same shape. + * An instance of Unbatch either receives an empty batched_tensor, in which case it + * asynchronously waits until the values become available from a concurrently + * running instance of Unbatch with the same container and shared_name, or receives + * a non-empty batched_tensor in which case it finalizes all other concurrently + * running instances and outputs its own element from the batch. + *

          + * batched_tensor: The possibly transformed output of Batch. The size of the first + * dimension should remain unchanged by the transformations for the operation to + * work. + * batch_index: The matching batch_index obtained from Batch. + * id: The id scalar emitted by Batch. + * unbatched_tensor: The Tensor corresponding to this execution. + * timeout_micros: Maximum amount of time (in microseconds) to wait to receive the + * batched input tensor associated with a given invocation of the op. + * container: Container to control resource sharing. + * shared_name: Instances of Unbatch with the same container and shared_name are + * assumed to possibly belong to the same batch. If left empty, the op name will + * be used as the shared name. * - * @param data type for {@code value()} output - * @param handle The handle to a TensorArray. - * @param indices The locations in the TensorArray from which to read tensor elements. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @param dtype The type of the elem that is returned. + * @param data type for {@code unbatchedTensor()} output + * @param batchedTensor + * @param batchIndex + * @param id + * @param timeoutMicros * @param options carries optional attributes values - * @return a new instance of TensorArrayGather + * @return a new instance of Unbatch */ - public TensorArrayGather tensorArrayGather(Operand handle, - Operand indices, Operand flowIn, DataType dtype, - TensorArrayGather.Options... options) { - return TensorArrayGather.create(scope, handle, indices, flowIn, dtype, options); + public Unbatch unbatch(Operand batchedTensor, Operand batchIndex, + Operand id, Long timeoutMicros, Unbatch.Options... options) { + return Unbatch.create(scope, batchedTensor, batchIndex, id, timeoutMicros, options); } /** - * Creates a TensorArray for storing the gradients of values in the given handle. - *

          - * If the given TensorArray gradient already exists, returns a reference to it. - *

          - * Locks the size of the original TensorArray by disabling its dynamic size flag. - *

          - * *A note about the input flow_in:** - *

          - * The handle flow_in forces the execution of the gradient lookup to occur - * only after certain other operations have occurred. For example, when - * the forward TensorArray is dynamically sized, writes to this TensorArray - * may resize the object. The gradient TensorArray is statically sized based - * on the size of the forward TensorArray when this operation executes. - * Furthermore, the size of the forward TensorArray is frozen by this call. - * As a result, the flow is used to ensure that the call to generate the gradient - * TensorArray only happens after all writes are executed. - *

          - * In the case of dynamically sized TensorArrays, gradient computation should - * only be performed on read operations that have themselves been chained via - * flow to occur only after all writes have executed. That way the final size - * of the forward TensorArray is known when this operation is called. - *

          - * *A note about the source attribute:** - *

          - * TensorArray gradient calls use an accumulator TensorArray object. If - * multiple gradients are calculated and run in the same session, the multiple - * gradient nodes may accidentally flow through the same accumulator TensorArray. - * This double counts and generally breaks the TensorArray gradient flow. + * Gradient of Unbatch. *

          - * The solution is to identify which gradient call this particular - * TensorArray gradient is being called in. This is performed by identifying - * a unique string (e.g. "gradients", "gradients_1", ...) from the input - * gradient Tensor's name. This string is used as a suffix when creating - * the TensorArray gradient object here (the attribute `source`). + * Acts like Batch but using the given batch_index index of batching things as they + * become available. This ensures that the gradients are propagated back in the + * same session which did the forward pass. *

          - * The attribute `source` is added as a suffix to the forward TensorArray's - * name when performing the creation / lookup, so that each separate gradient - * calculation gets its own TensorArray accumulator. + * original_input: The input to the Unbatch operation this is the gradient of. + * batch_index: The batch_index given to the Unbatch operation this is the gradient + * of. + * grad: The downstream gradient. + * id: The id scalar emitted by Batch. + * batched_grad: The return value, either an empty tensor or the batched gradient. + * container: Container to control resource sharing. + * shared_name: Instances of UnbatchGrad with the same container and shared_name + * are assumed to possibly belong to the same batch. If left empty, the op name + * will be used as the shared name. * - * @param handle The handle to the forward TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @param source The gradient source string, used to decide which gradient TensorArray - * to return. - * @return a new instance of TensorArrayGrad + * @param data type for {@code batchedGrad()} output + * @param originalInput + * @param batchIndex + * @param grad + * @param id + * @param options carries optional attributes values + * @return a new instance of UnbatchGrad */ - public TensorArrayGrad tensorArrayGrad(Operand handle, Operand flowIn, - String source) { - return TensorArrayGrad.create(scope, handle, flowIn, source); + public UnbatchGrad unbatchGrad(Operand originalInput, + Operand batchIndex, Operand grad, Operand id, + UnbatchGrad.Options... options) { + return UnbatchGrad.create(scope, originalInput, batchIndex, grad, id, options); } /** - * Creates a TensorArray for storing multiple gradients of values in the given handle. + * Finds unique elements along an axis of a tensor. *

          - * Similar to TensorArrayGradV3. However it creates an accumulator with an - * expanded shape compared to the input TensorArray whose gradient is being - * computed. This enables multiple gradients for the same TensorArray to be - * calculated using the same accumulator. - * - * @param handle The handle to the forward TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @param shapeToPrepend An int32 vector representing a shape. Elements in the gradient accumulator will - * have shape which is this shape_to_prepend value concatenated with shape of the - * elements in the TensorArray corresponding to the input handle. - * @param source The gradient source string, used to decide which gradient TensorArray - * to return. - * @return a new instance of TensorArrayGradWithShape - */ - public TensorArrayGradWithShape tensorArrayGradWithShape(Operand handle, - Operand flowIn, Operand shapeToPrepend, String source) { - return TensorArrayGradWithShape.create(scope, handle, flowIn, shapeToPrepend, source); - } - - /** + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + *

          + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + *

          + * For example: + *

          {@code
          +   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          +   *  y, idx = unique(x)
          +   *  y ==> [1, 2, 4, 7, 8]
          +   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 0`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx = unique(x, axis=0)
          +   *  y ==> [[1, 0, 0],
          +   *         [2, 0, 0]]
          +   *  idx ==> [0, 0, 1]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 1`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx = unique(x, axis=1)
          +   *  y ==> [[1, 0],
          +   *         [1, 0],
          +   *         [2, 0]]
          +   *  idx ==> [0, 1, 1]
          +   *  }
          * - * @param data type for {@code value()} output - * @param handle - * @param flowIn - * @param dtype - * @param options carries optional attributes values - * @return a new instance of TensorArrayPack + * @param data type for {@code y()} output + * @param data type for {@code idx()} output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @return a new instance of Unique */ - public TensorArrayPack tensorArrayPack(Operand handle, - Operand flowIn, DataType dtype, TensorArrayPack.Options... options) { - return TensorArrayPack.create(scope, handle, flowIn, dtype, options); + public Unique unique(Operand x, + Operand axis) { + return Unique.create(scope, x, axis); } /** - * Read an element from the TensorArray into output `value`. - * - * @param data type for {@code value()} output - * @param handle The handle to a TensorArray. - * @param index - * @param flowIn A float scalar that enforces proper chaining of operations. - * @param dtype The type of the elem that is returned. - * @return a new instance of TensorArrayRead - */ - public TensorArrayRead tensorArrayRead(Operand handle, - Operand index, Operand flowIn, DataType dtype) { - return TensorArrayRead.create(scope, handle, index, flowIn, dtype); - } - - /** - * Scatter the data from the input value into specific TensorArray elements. + * Finds unique elements along an axis of a tensor. *

          - * `indices` must be a vector, its length must match the first dim of `value`. + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + *

          + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + *

          + * For example: + *

          {@code
          +   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          +   *  y, idx = unique(x)
          +   *  y ==> [1, 2, 4, 7, 8]
          +   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 0`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx = unique(x, axis=0)
          +   *  y ==> [[1, 0, 0],
          +   *         [2, 0, 0]]
          +   *  idx ==> [0, 0, 1]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 1`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx = unique(x, axis=1)
          +   *  y ==> [[1, 0],
          +   *         [1, 0],
          +   *         [2, 0]]
          +   *  idx ==> [0, 1, 1]
          +   *  }
          * - * @param handle The handle to a TensorArray. - * @param indices The locations at which to write the tensor elements. - * @param value The concatenated tensor to write to the TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @return a new instance of TensorArrayScatter + * @param data type for {@code y()} output + * @param data type for {@code idx()} output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx + * @return a new instance of Unique */ - public TensorArrayScatter tensorArrayScatter(Operand handle, - Operand indices, Operand value, Operand flowIn) { - return TensorArrayScatter.create(scope, handle, indices, value, flowIn); + public Unique unique(Operand x, + Operand axis, DataType outIdx) { + return Unique.create(scope, x, axis, outIdx); } /** - * Get the current size of the TensorArray. + * Finds unique elements along an axis of a tensor. + *

          + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + *

          + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + *

          + * For example: + *

          {@code
          +   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          +   *  y, idx, count = unique_with_counts(x)
          +   *  y ==> [1, 2, 4, 7, 8]
          +   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          +   *  count ==> [2, 1, 3, 1, 2]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 0`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx, count = unique_with_counts(x, axis=0)
          +   *  y ==> [[1, 0, 0],
          +   *         [2, 0, 0]]
          +   *  idx ==> [0, 0, 1]
          +   *  count ==> [2, 1]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 1`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx, count = unique_with_counts(x, axis=1)
          +   *  y ==> [[1, 0],
          +   *         [1, 0],
          +   *         [2, 0]]
          +   *  idx ==> [0, 1, 1]
          +   *  count ==> [1, 2]
          +   *  }
          * - * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). - * @param flowIn A float scalar that enforces proper chaining of operations. - * @return a new instance of TensorArraySize + * @param data type for {@code y()} output + * @param data type for {@code idx()} output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @return a new instance of UniqueWithCounts */ - public TensorArraySize tensorArraySize(Operand handle, Operand flowIn) { - return TensorArraySize.create(scope, handle, flowIn); + public UniqueWithCounts uniqueWithCounts( + Operand x, Operand axis) { + return UniqueWithCounts.create(scope, x, axis); } /** - * Split the data from the input value into TensorArray elements. - *

          - * Assuming that `lengths` takes on values - *

          - *

          {@code
          -   *  (n0, n1, ..., n(T-1))}
          - * and that `value` has shape - *

          - *

          {@code
          -   *  (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}
          - * , - *

          - * this splits values into a TensorArray with T tensors. + * Finds unique elements along an axis of a tensor. *

          - * TensorArray index t will be the subtensor of values with starting position + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: *

          - *

          {@code
          -   *  (n0 + n1 + ... + n(t-1), 0, 0, ...)}
          - * and having size + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` *

          - *

          {@code
          -   *  nt x d0 x d1 x ...}
          + * For example: + *
          {@code
          +   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          +   *  y, idx, count = unique_with_counts(x)
          +   *  y ==> [1, 2, 4, 7, 8]
          +   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          +   *  count ==> [2, 1, 3, 1, 2]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 0`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx, count = unique_with_counts(x, axis=0)
          +   *  y ==> [[1, 0, 0],
          +   *         [2, 0, 0]]
          +   *  idx ==> [0, 0, 1]
          +   *  count ==> [2, 1]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 1`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx, count = unique_with_counts(x, axis=1)
          +   *  y ==> [[1, 0],
          +   *         [1, 0],
          +   *         [2, 0]]
          +   *  idx ==> [0, 1, 1]
          +   *  count ==> [1, 2]
          +   *  }
          * - * @param handle The handle to a TensorArray. - * @param value The concatenated tensor to write to the TensorArray. - * @param lengths The vector of lengths, how to split the rows of value into the - * TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @return a new instance of TensorArraySplit + * @param data type for {@code y()} output + * @param data type for {@code idx()} output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx + * @return a new instance of UniqueWithCounts */ - public TensorArraySplit tensorArraySplit(Operand handle, Operand value, - Operand lengths, Operand flowIn) { - return TensorArraySplit.create(scope, handle, value, lengths, flowIn); + public UniqueWithCounts uniqueWithCounts( + Operand x, Operand axis, DataType outIdx) { + return UniqueWithCounts.create(scope, x, axis, outIdx); } /** + * Converts an array of flat indices into a tuple of coordinate arrays. + *

          * - * @param handle - * @param value - * @param flowIn - * @return a new instance of TensorArrayUnpack - */ - public TensorArrayUnpack tensorArrayUnpack(Operand handle, - Operand value, Operand flowIn) { - return TensorArrayUnpack.create(scope, handle, value, flowIn); - } - - /** - * Push an element onto the tensor_array. + * Example: + *

          {@code
          +   *  y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])
          +   *  # 'dims' represent a hypothetical (3, 3) tensor of indices:
          +   *  # [[0, 1, *2*],
          +   *  #  [3, 4, *5*],
          +   *  #  [6, *7*, 8]]
          +   *  # For each entry from 'indices', this operation returns
          +   *  # its coordinates (marked with '*'), such as
          +   *  # 2 ==> (0, 2)
          +   *  # 5 ==> (1, 2)
          +   *  # 7 ==> (2, 1)
          +   *  y ==> [[0, 1, 2], [2, 2, 1]]
          +   *  }
          * - * @param handle The handle to a TensorArray. - * @param index The position to write to inside the TensorArray. - * @param value The tensor to write to the TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @return a new instance of TensorArrayWrite + * @compatibility(numpy) Equivalent to np.unravel_index + * @end_compatibility + * @param data type for {@code output()} output + * @param indices An 0-D or 1-D `int` Tensor whose elements are indices into the + * flattened version of an array of dimensions dims. + * @param dims An 1-D `int` Tensor. The shape of the array to use for unraveling + * indices. + * @return a new instance of UnravelIndex */ - public TensorArrayWrite tensorArrayWrite(Operand handle, - Operand index, Operand value, Operand flowIn) { - return TensorArrayWrite.create(scope, handle, index, value, flowIn); + public UnravelIndex unravelIndex(Operand indices, Operand dims) { + return UnravelIndex.create(scope, indices, dims); } /** - * Concats all tensors in the list along the 0th dimension. + * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. *

          - * Requires that all tensors have the same shape except the first dimension. + * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. + * For example, given a tensor of shape `(A, B, C, D)`; *

          - * input_handle: The input list. - * element_shape: The shape of the uninitialized elements in the list. If the first - * dimension is not -1, it is assumed that all list elements have the same - * leading dim. - * leading_dims: The list of leading dims of uninitialized list elements. Used if - * the leading dim of input_handle.element_shape or the element_shape input arg - * is not already set. - * tensor: The concated result. - * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. + * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` + * and each tensor in `output` will have shape `(B, C, D)`. (Note that the + * dimension unpacked along is gone, unlike `split`). + *

          + * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` + * and each tensor in `output` will have shape `(A, C, D)`. + * Etc. + *

          + * This is the opposite of `pack`. * - * @param data type for {@code tensor()} output - * @param inputHandle - * @param elementShape - * @param leadingDims - * @param elementDtype - * @return a new instance of TensorListConcat + * @param data type for {@code output()} output + * @param value 1-D or higher, with `axis` dimension size equal to `num`. + * @param num + * @param options carries optional attributes values + * @return a new instance of Unstack */ - public TensorListConcat tensorListConcat( - Operand inputHandle, Operand elementShape, Operand leadingDims, - DataType elementDtype) { - return TensorListConcat.create(scope, inputHandle, elementShape, leadingDims, elementDtype); + public Unstack unstack(Operand value, Long num, + Unstack.Options... options) { + return Unstack.create(scope, value, num, options); } /** + * Op is similar to a lightweight Dequeue. + *

          + * The basic functionality is similar to dequeue with many fewer + * capabilities and options. This Op is optimized for performance. * - * @param inputA - * @param inputB - * @param elementDtype - * @return a new instance of TensorListConcatLists + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of Unstage */ - public TensorListConcatLists tensorListConcatLists(Operand inputA, - Operand inputB, DataType elementDtype) { - return TensorListConcatLists.create(scope, inputA, inputB, elementDtype); + public Unstage unstage(List> dtypes, Unstage.Options... options) { + return Unstage.create(scope, dtypes, options); } /** - * The shape of the elements of the given list, as a tensor. - *

          - * input_handle: the list - * element_shape: the shape of elements of the list + * Creates a rank-1 constant of {@code long} elements. * - * @param data type for {@code elementShape()} output - * @param inputHandle - * @param shapeType - * @return a new instance of TensorListElementShape + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public TensorListElementShape tensorListElementShape( - Operand inputHandle, DataType shapeType) { - return TensorListElementShape.create(scope, inputHandle, shapeType); + public Constant val(long[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a TensorList which, when stacked, has the value of `tensor`. - *

          - * Each tensor in the result list corresponds to one row of the input tensor. - *

          - * tensor: The input tensor. - * output_handle: The list. + * Creates a rank-6 constant of {@code double} elements. * - * @param tensor - * @param elementShape - * @return a new instance of TensorListFromTensor + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant */ - public TensorListFromTensor tensorListFromTensor( - Operand tensor, Operand elementShape) { - return TensorListFromTensor.create(scope, tensor, elementShape); + public Constant val(double[][][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a Tensor by indexing into the TensorList. - *

          - * Each row in the produced Tensor corresponds to the element in the TensorList - * specified by the given index (see `tf.gather`). - *

          - * input_handle: The input tensor list. - * indices: The indices used to index into the list. - * values: The tensor. + * Creates a constant of {@code byte} elements that is a copy of a given n-dimensional array. * - * @param data type for {@code values()} output - * @param inputHandle - * @param indices - * @param elementShape - * @param elementDtype - * @return a new instance of TensorListGather + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code byte} elements. + * @return a byte constant */ - public TensorListGather tensorListGather(Operand inputHandle, - Operand indices, Operand elementShape, DataType elementDtype) { - return TensorListGather.create(scope, inputHandle, indices, elementShape, elementDtype); + public Constant val(ByteNdArray data) { + return Constant.tensorOf(scope, data); } /** + * Creates a rank-4 constant of {@code boolean} elements. * - * @param data type for {@code item()} output - * @param inputHandle - * @param index - * @param elementShape - * @param elementDtype - * @return a new instance of TensorListGetItem + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public TensorListGetItem tensorListGetItem(Operand inputHandle, - Operand index, Operand elementShape, DataType elementDtype) { - return TensorListGetItem.create(scope, inputHandle, index, elementShape, elementDtype); + public Constant val(boolean[][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Returns the number of tensors in the input tensor list. - *

          - * input_handle: the input list - * length: the number of tensors in the list + * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. * - * @param inputHandle - * @return a new instance of TensorListLength + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code float} elements. + * @return a float constant */ - public TensorListLength tensorListLength(Operand inputHandle) { - return TensorListLength.create(scope, inputHandle); + public Constant val(FloatNdArray data) { + return Constant.tensorOf(scope, data); } /** - * Returns the last element of the input list as well as a list with all but that element. - *

          - * Fails if the list is empty. - *

          - * input_handle: the input list - * tensor: the withdrawn last element of the list - * element_dtype: the type of elements in the list - * element_shape: the shape of the output tensor + * Creates a rank-6 constant of {@code boolean} elements. * - * @param data type for {@code tensor()} output - * @param inputHandle - * @param elementShape - * @param elementDtype - * @return a new instance of TensorListPopBack + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public TensorListPopBack tensorListPopBack(Operand inputHandle, - Operand elementShape, DataType elementDtype) { - return TensorListPopBack.create(scope, inputHandle, elementShape, elementDtype); + public Constant val(boolean[][][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`. - *

          - * tensor: The tensor to put on the list. - * input_handle: The old list. - * output_handle: A list with the elements of the old list followed by tensor. - * element_dtype: the type of elements in the list. - * element_shape: a shape compatible with that of elements in the list. + * Creates a rank-2 constant of {@code long} elements. * - * @param inputHandle - * @param tensor - * @return a new instance of TensorListPushBack + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public TensorListPushBack tensorListPushBack(Operand inputHandle, - Operand tensor) { - return TensorListPushBack.create(scope, inputHandle, tensor); + public Constant val(long[][] data) { + return Constant.tensorOf(scope, data); } /** + * Creates a constant of {@code double} elements that is a copy of a given n-dimensional array. * - * @param inputHandles - * @param tensor - * @return a new instance of TensorListPushBackBatch + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code double} elements. + * @return a double constant */ - public TensorListPushBackBatch tensorListPushBackBatch(Operand inputHandles, - Operand tensor) { - return TensorListPushBackBatch.create(scope, inputHandles, tensor); + public Constant val(DoubleNdArray data) { + return Constant.tensorOf(scope, data); } /** - * List of the given size with empty elements. - *

          - * element_shape: the shape of the future elements of the list - * num_elements: the number of elements to reserve - * handle: the output list - * element_dtype: the desired type of elements in the list. + * Creates a rank-3 constant of {@code boolean} elements. * - * @param elementShape - * @param numElements - * @param elementDtype - * @return a new instance of TensorListReserve + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public TensorListReserve tensorListReserve( - Operand elementShape, Operand numElements, DataType elementDtype) { - return TensorListReserve.create(scope, elementShape, numElements, elementDtype); + public Constant val(boolean[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Resizes the list. - *

          - * - * input_handle: the input list - * size: size of the output list + * Creates a constant containing a single {@code float} element. * - * @param inputHandle - * @param size - * @return a new instance of TensorListResize + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a float constant */ - public TensorListResize tensorListResize(Operand inputHandle, Operand size) { - return TensorListResize.create(scope, inputHandle, size); + public Constant val(float data) { + return Constant.scalarOf(scope, data); } /** - * Creates a TensorList by indexing into a Tensor. - *

          - * Each member of the TensorList corresponds to one row of the input tensor, - * specified by the given index (see `tf.gather`). - *

          - * tensor: The input tensor. - * indices: The indices used to index into the list. - * element_shape: The shape of the elements in the list (can be less specified than - * the shape of the tensor). - * num_elements: The size of the output list. Must be large enough to accommodate - * the largest index in indices. If -1, the list is just large enough to include - * the largest index in indices. - * output_handle: The TensorList. + * Creates a rank-6 constant of {@code float} elements. * - * @param tensor - * @param indices - * @param elementShape - * @param numElements - * @return a new instance of TensorListScatter + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant */ - public TensorListScatter tensorListScatter(Operand tensor, - Operand indices, Operand elementShape, Operand numElements) { - return TensorListScatter.create(scope, tensor, indices, elementShape, numElements); + public Constant val(float[][][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Scatters tensor at indices in an input list. - *

          - * Each member of the TensorList corresponds to one row of the input tensor, - * specified by the given index (see `tf.gather`). - *

          - * input_handle: The list to scatter into. - * tensor: The input tensor. - * indices: The indices used to index into the list. - * output_handle: The TensorList. + * Creates a rank-5 constant of {@code byte} elements. * - * @param inputHandle - * @param tensor - * @param indices - * @return a new instance of TensorListScatterIntoExistingList + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - public TensorListScatterIntoExistingList tensorListScatterIntoExistingList( - Operand inputHandle, Operand tensor, Operand indices) { - return TensorListScatterIntoExistingList.create(scope, inputHandle, tensor, indices); + public Constant val(byte[][][][][] data) { + return Constant.tensorOf(scope, data); } /** + * Creates a rank-4 constant of {@code float} elements. * - * @param inputHandle - * @param index - * @param item - * @return a new instance of TensorListSetItem - */ - public TensorListSetItem tensorListSetItem(Operand inputHandle, - Operand index, Operand item) { - return TensorListSetItem.create(scope, inputHandle, index, item); + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant + */ + public Constant val(float[][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Splits a tensor into a list. - *

          - * list[i] corresponds to lengths[i] tensors from the input tensor. - * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. - *

          - * tensor: The input tensor. - * element_shape: A shape compatible with that of elements in the tensor. - * lengths: Vector of sizes of the 0th dimension of tensors in the list. - * output_handle: The list. + * Creates a rank-5 constant of {@code double} elements. * - * @param tensor - * @param elementShape - * @param lengths - * @return a new instance of TensorListSplit + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant */ - public TensorListSplit tensorListSplit(Operand tensor, - Operand elementShape, Operand lengths) { - return TensorListSplit.create(scope, tensor, elementShape, lengths); + public Constant val(double[][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Stacks all tensors in the list. - *

          - * Requires that all tensors have the same shape. - *

          - * input_handle: the input list - * tensor: the gathered result - * num_elements: optional. If not -1, the number of elements in the list. + * Creates a constant containing a single {@code int} element. * - * @param data type for {@code tensor()} output - * @param inputHandle - * @param elementShape - * @param elementDtype - * @param options carries optional attributes values - * @return a new instance of TensorListStack + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return an integer constant */ - public TensorListStack tensorListStack(Operand inputHandle, - Operand elementShape, DataType elementDtype, TensorListStack.Options... options) { - return TensorListStack.create(scope, inputHandle, elementShape, elementDtype, options); + public Constant val(int data) { + return Constant.scalarOf(scope, data); } /** - * Adds sparse `updates` to an existing tensor according to `indices`. - *

          - * This operation creates a new tensor by adding sparse `updates` to the passed - * in `tensor`. - * This operation is very similar to `tf.scatter_nd_add`, except that the updates - * are added onto an existing tensor (as opposed to a variable). If the memory - * for the existing tensor cannot be re-used, a copy is made and updated. - *

          - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - *

          - * indices.shape[-1] <= shape.rank - *

          - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape - *

          - * indices.shape[:-1] + shape[indices.shape[-1]:] - *

          - * The simplest form of tensor_scatter_add is to add individual elements to a - * tensor by index. For example, say we want to add 4 elements in a rank-1 - * tensor with 8 elements. - *

          - * In Python, this scatter add operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[4], [3], [1], [7]])
          -   *      updates = tf.constant([9, 10, 11, 12])
          -   *      tensor = tf.ones([8], dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [1, 12, 1, 11, 10, 1, 1, 13] - *

          - * We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. - *

          - * In Python, this scatter add operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[0], [2]])
          -   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          -   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          -   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - *

          - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * Creates a rank-3 constant of {@code float} elements. * - * @param data type for {@code output()} output - * @param tensor Tensor to copy/update. - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @return a new instance of TensorScatterNdAdd + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant */ - public TensorScatterNdAdd tensorScatterNdAdd( - Operand tensor, Operand indices, Operand updates) { - return TensorScatterNdAdd.create(scope, tensor, indices, updates); + public Constant val(float[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Subtracts sparse `updates` from an existing tensor according to `indices`. - *

          - * This operation creates a new tensor by subtracting sparse `updates` from the - * passed in `tensor`. - * This operation is very similar to `tf.scatter_nd_sub`, except that the updates - * are subtracted from an existing tensor (as opposed to a variable). If the memory - * for the existing tensor cannot be re-used, a copy is made and updated. - *

          - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - *

          - * indices.shape[-1] <= shape.rank - *

          - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape - *

          - * indices.shape[:-1] + shape[indices.shape[-1]:] - *

          - * The simplest form of tensor_scatter_sub is to subtract individual elements - * from a tensor by index. For example, say we want to insert 4 scattered elements - * in a rank-1 tensor with 8 elements. - *

          - * In Python, this scatter subtract operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[4], [3], [1], [7]])
          -   *      updates = tf.constant([9, 10, 11, 12])
          -   *      tensor = tf.ones([8], dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [1, -10, 1, -9, -8, 1, 1, -11] - *

          - * We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. - *

          - * In Python, this scatter add operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[0], [2]])
          -   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          -   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          -   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - *

          - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * Creates a rank-1 constant of {@code byte} elements. * - * @param data type for {@code output()} output - * @param tensor Tensor to copy/update. - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @return a new instance of TensorScatterNdSub + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - public TensorScatterNdSub tensorScatterNdSub( - Operand tensor, Operand indices, Operand updates) { - return TensorScatterNdSub.create(scope, tensor, indices, updates); + public Constant val(byte[] data) { + return Constant.vectorOf(scope, data); } /** - * Scatter `updates` into an existing tensor according to `indices`. - *

          - * This operation creates a new tensor by applying sparse `updates` to the passed - * in `tensor`. - * This operation is very similar to `tf.scatter_nd`, except that the updates are - * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory - * for the existing tensor cannot be re-used, a copy is made and updated. - *

          - * If `indices` contains duplicates, then their updates are accumulated (summed). - *

          - * WARNING: The order in which updates are applied is nondeterministic, so the - * output will be nondeterministic if `indices` contains duplicates -- because - * of some numerical approximation issues, numbers summed in different order - * may yield different results. - *

          - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - *

          - * indices.shape[-1] <= shape.rank - *

          - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape - *

          - * indices.shape[:-1] + shape[indices.shape[-1]:] - *

          - * The simplest form of scatter is to insert individual elements in a tensor by - * index. For example, say we want to insert 4 scattered elements in a rank-1 - * tensor with 8 elements. - *

          - *

          - * - *
          - *

          - * In Python, this scatter operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[4], [3], [1], [7]])
          -   *      updates = tf.constant([9, 10, 11, 12])
          -   *      tensor = tf.ones([8], dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_update(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [1, 11, 1, 10, 9, 1, 1, 12] - *

          - * We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. - *

          - * In Python, this scatter operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[0], [2]])
          -   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          -   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          -   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_update(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - *

          - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * Creates a rank-4 constant of {@code double} elements. * - * @param data type for {@code output()} output - * @param tensor Tensor to copy/update. - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @return a new instance of TensorScatterNdUpdate + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant + */ + public Constant val(double[][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant of {@code boolean} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code boolean} elements. + * @return a boolean constant + */ + public Constant val(BooleanNdArray data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-5 constant of {@code boolean} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant + */ + public Constant val(boolean[][][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-3 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a {@code String} constant using the default, UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data The string to put into the new constant. + * @return a string constant + */ + public Constant val(String data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code double} element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a double constant + */ + public Constant val(double data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-1 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[] data) { + return Constant.vectorOf(scope, data); + } + + /** + * Creates a rank-5 constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant + */ + public Constant val(long[][][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-2 constant of {@code byte} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant + */ + public Constant val(byte[][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-6 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[][][][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code long} elements. + * @return a long constant + */ + public Constant val(LongNdArray data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-2 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant containing a single {@code boolean} element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a boolean constant + */ + public Constant val(boolean data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-1 constant of {@code double} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant + */ + public Constant val(double[] data) { + return Constant.vectorOf(scope, data); + } + + /** + * Creates a rank-1 constant of {@code float} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant + */ + public Constant val(float[] data) { + return Constant.vectorOf(scope, data); + } + + /** + * Creates a rank-5 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[][][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-4 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-3 constant of {@code byte} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant + */ + public Constant val(byte[][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-4 constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant + */ + public Constant val(long[][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-2 constant of {@code double} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant + */ + public Constant val(double[][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant containing a single {@code long} element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a long constant + */ + public Constant val(long data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code byte} element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a byte constant + */ + public Constant val(byte data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-3 constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public TensorScatterNdUpdate tensorScatterNdUpdate( - Operand tensor, Operand indices, Operand updates) { - return TensorScatterNdUpdate.create(scope, tensor, indices, updates); + public Constant val(long[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Assign `value` to the sliced l-value reference of `input`. - *

          - * The values of `value` are assigned to the positions in the tensor `input` that - * are selected by the slice parameters. The slice parameters `begin` `end` - * `strides` etc. work exactly as in `StridedSlice`. - *

          - * NOTE this op currently does not support broadcasting and so `value`'s shape - * must be exactly the shape produced by the slice of `input`. + * Creates a rank-5 constant of {@code float} elements. * - * @param data type for {@code output()} output - * @param input - * @param begin - * @param end - * @param strides - * @param value - * @param options carries optional attributes values - * @return a new instance of TensorStridedSliceUpdate + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant */ - public TensorStridedSliceUpdate tensorStridedSliceUpdate( - Operand input, Operand begin, Operand end, Operand strides, Operand value, - TensorStridedSliceUpdate.Options... options) { - return TensorStridedSliceUpdate.create(scope, input, begin, end, strides, value, options); + public Constant val(float[][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Constructs a tensor by tiling a given tensor. - *

          - * This operation creates a new tensor by replicating `input` `multiples` times. - * The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, - * and the values of `input` are replicated `multiples[i]` times along the 'i'th - * dimension. For example, tiling `[a b c d]` by `[2]` produces - * `[a b c d a b c d]`. - *

          - * >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) - * >>> b = tf.constant([1,2], tf.int32) - * >>> tf.tile(a, b) - * - * >>> c = tf.constant([2,1], tf.int32) - * >>> tf.tile(a, c) - * - * >>> d = tf.constant([2,2], tf.int32) - * >>> tf.tile(a, d) - * + * Creates a rank-6 constant of {@code byte} elements. * - * @param data type for {@code output()} output - * @param input 1-D or higher. - * @param multiples 1-D. Length must be the same as the number of dimensions in `input` - * @return a new instance of Tile + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - public Tile tile(Operand input, Operand multiples) { - return Tile.create(scope, input, multiples); + public Constant val(byte[][][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Provides the time since epoch in seconds. - *

          - * Returns the timestamp as a `float64` for seconds since the Unix epoch. - *

          - * Note: the timestamp is computed when the op is executed, not when it is added - * to the graph. + * Creates a rank-3 constant of {@code double} elements. * - * @return a new instance of Timestamp + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant */ - public Timestamp timestamp() { - return Timestamp.create(scope); + public Constant val(double[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Perform batches of RPC requests. - *

          - * This op asynchronously performs either a single RPC request, or a batch - * of requests. RPC requests are defined by three main parameters: - *

          - * - `address` (the host+port or BNS address of the request) - * - `method` (the method name for the request) - * - `request` (the serialized proto string, or vector of strings, - * of the RPC request argument). - *

          - * For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: - *

          {@code
          -   *  service MyService {
          -   *    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
          -   *    }
          -   *  };
          -   *  }
          - * then call this op with arguments: - *
          {@code
          -   *  address = "localhost:2345"
          -   *  method = "MyService/MyMethod"
          -   *  }
          - * The `request` tensor is a string tensor representing serialized `MyRequestProto` - * strings; and the output string tensor `response` will have the same shape - * and contain (upon successful completion) corresponding serialized - * `MyResponseProto` strings. - *

          - * For example, to send a single, empty, `MyRequestProto`, call - * this op with `request = ""`. To send 5 parallel empty requests, - * call this op with `request = ["", "", "", "", ""]`. - *

          - * More generally, one can create a batch of `MyRequestProto` serialized protos - * from regular batched tensors using the `encode_proto` op, and convert - * the response `MyResponseProto` serialized protos to batched tensors - * using the `decode_proto` op. - *

          - * NOTE Working with serialized proto strings is faster than instantiating - * actual proto objects in memory, so no performance degradation is expected - * compared to writing custom kernels for this workflow. - *

          - * Unlike the standard `Rpc` op, if the connection fails or the remote worker - * returns an error status, this op does not reraise the exception. - * Instead, the `status_code` and `status_message` entry for the corresponding RPC - * call is set with the error returned from the RPC call. The `response` tensor - * will contain valid response values for those minibatch entries whose RPCs did - * not fail; the rest of the entries will have empty strings. + * Creates a rank-2 constant of {@code boolean} elements. * - * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `method` and `request`. - * @param method `0-D` or `1-D`. The method address on the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `request`. - * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `method`. - * @param options carries optional attributes values - * @return a new instance of TryRpc + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public TryRpc tryRpc(Operand address, Operand method, Operand request, - TryRpc.Options... options) { - return TryRpc.create(scope, address, method, request, options); + public Constant val(boolean[][] data) { + return Constant.tensorOf(scope, data); } /** - * Reverses the operation of Batch for a single output Tensor. - *

          - * An instance of Unbatch either receives an empty batched_tensor, in which case it - * asynchronously waits until the values become available from a concurrently - * running instance of Unbatch with the same container and shared_name, or receives - * a non-empty batched_tensor in which case it finalizes all other concurrently - * running instances and outputs its own element from the batch. - *

          - * batched_tensor: The possibly transformed output of Batch. The size of the first - * dimension should remain unchanged by the transformations for the operation to - * work. - * batch_index: The matching batch_index obtained from Batch. - * id: The id scalar emitted by Batch. - * unbatched_tensor: The Tensor corresponding to this execution. - * timeout_micros: Maximum amount of time (in microseconds) to wait to receive the - * batched input tensor associated with a given invocation of the op. - * container: Container to control resource sharing. - * shared_name: Instances of Unbatch with the same container and shared_name are - * assumed to possibly belong to the same batch. If left empty, the op name will - * be used as the shared name. + * Creates a rank-2 constant of {@code float} elements. * - * @param data type for {@code unbatchedTensor()} output - * @param batchedTensor - * @param batchIndex - * @param id - * @param timeoutMicros - * @param options carries optional attributes values - * @return a new instance of Unbatch + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant */ - public Unbatch unbatch(Operand batchedTensor, Operand batchIndex, - Operand id, Long timeoutMicros, Unbatch.Options... options) { - return Unbatch.create(scope, batchedTensor, batchIndex, id, timeoutMicros, options); + public Constant val(float[][] data) { + return Constant.tensorOf(scope, data); } /** - * Gradient of Unbatch. - *

          - * Acts like Batch but using the given batch_index index of batching things as they - * become available. This ensures that the gradients are propagated back in the - * same session which did the forward pass. - *

          - * original_input: The input to the Unbatch operation this is the gradient of. - * batch_index: The batch_index given to the Unbatch operation this is the gradient - * of. - * grad: The downstream gradient. - * id: The id scalar emitted by Batch. - * batched_grad: The return value, either an empty tensor or the batched gradient. - * container: Container to control resource sharing. - * shared_name: Instances of UnbatchGrad with the same container and shared_name - * are assumed to possibly belong to the same batch. If left empty, the op name - * will be used as the shared name. + * Creates a rank-1 constant of {@code boolean} elements. * - * @param data type for {@code batchedGrad()} output - * @param originalInput - * @param batchIndex - * @param grad - * @param id - * @param options carries optional attributes values - * @return a new instance of UnbatchGrad + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public UnbatchGrad unbatchGrad(Operand originalInput, - Operand batchIndex, Operand grad, Operand id, - UnbatchGrad.Options... options) { - return UnbatchGrad.create(scope, originalInput, batchIndex, grad, id, options); + public Constant val(boolean[] data) { + return Constant.vectorOf(scope, data); } /** - * Finds unique elements along an axis of a tensor. - *

          - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` that is the same size as - * the number of the elements in `x` along the `axis` dimension. It - * contains the index in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - *

          - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - *

          - * For example: - *

          {@code
          -   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          -   *  y, idx = unique(x)
          -   *  y ==> [1, 2, 4, 7, 8]
          -   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 0`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx = unique(x, axis=0)
          -   *  y ==> [[1, 0, 0],
          -   *         [2, 0, 0]]
          -   *  idx ==> [0, 0, 1]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 1`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx = unique(x, axis=1)
          -   *  y ==> [[1, 0],
          -   *         [1, 0],
          -   *         [2, 0]]
          -   *  idx ==> [0, 1, 1]
          -   *  }
          + * Creates a rank-4 constant of {@code byte} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant + */ + public Constant val(byte[][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. * - * @param data type for {@code y()} output - * @param data type for {@code idx()} output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to - * find the unique elements. - * @return a new instance of Unique + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code int} elements. + * @return an integer constant */ - public Unique unique(Operand x, - Operand axis) { - return Unique.create(scope, x, axis); + public Constant val(IntNdArray data) { + return Constant.tensorOf(scope, data); } /** - * Finds unique elements along an axis of a tensor. - *

          - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` that is the same size as - * the number of the elements in `x` along the `axis` dimension. It - * contains the index in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - *

          - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - *

          - * For example: - *

          {@code
          -   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          -   *  y, idx = unique(x)
          -   *  y ==> [1, 2, 4, 7, 8]
          -   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 0`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx = unique(x, axis=0)
          -   *  y ==> [[1, 0, 0],
          -   *         [2, 0, 0]]
          -   *  idx ==> [0, 0, 1]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 1`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx = unique(x, axis=1)
          -   *  y ==> [[1, 0],
          -   *         [1, 0],
          -   *         [2, 0]]
          -   *  idx ==> [0, 1, 1]
          -   *  }
          + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. * - * @param data type for {@code y()} output - * @param data type for {@code idx()} output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to - * find the unique elements. - * @param outIdx - * @return a new instance of Unique + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code String} elements. + * @return a byte constant */ - public Unique unique(Operand x, - Operand axis, DataType outIdx) { - return Unique.create(scope, x, axis, outIdx); + public Constant val(NdArray data) { + return Constant.tensorOf(scope, data); } /** - * Finds unique elements along an axis of a tensor. - *

          - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` and a tensor `count` - * that are the same size as the number of the elements in `x` along the - * `axis` dimension. The `idx` contains the index in the unique output `y` - * and the `count` contains the count in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - *

          - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - *

          - * For example: - *

          {@code
          -   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          -   *  y, idx, count = unique_with_counts(x)
          -   *  y ==> [1, 2, 4, 7, 8]
          -   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          -   *  count ==> [2, 1, 3, 1, 2]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 0`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx, count = unique_with_counts(x, axis=0)
          -   *  y ==> [[1, 0, 0],
          -   *         [2, 0, 0]]
          -   *  idx ==> [0, 0, 1]
          -   *  count ==> [2, 1]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 1`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx, count = unique_with_counts(x, axis=1)
          -   *  y ==> [[1, 0],
          -   *         [1, 0],
          -   *         [2, 0]]
          -   *  idx ==> [0, 1, 1]
          -   *  count ==> [1, 2]
          -   *  }
          + * Creates a rank-6 constant of {@code long} elements. * - * @param data type for {@code y()} output - * @param data type for {@code idx()} output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to - * find the unique elements. - * @return a new instance of UniqueWithCounts + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public UniqueWithCounts uniqueWithCounts( - Operand x, Operand axis) { - return UniqueWithCounts.create(scope, x, axis); + public Constant val(long[][][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Finds unique elements along an axis of a tensor. - *

          - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` and a tensor `count` - * that are the same size as the number of the elements in `x` along the - * `axis` dimension. The `idx` contains the index in the unique output `y` - * and the `count` contains the count in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - *

          - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - *

          - * For example: - *

          {@code
          -   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          -   *  y, idx, count = unique_with_counts(x)
          -   *  y ==> [1, 2, 4, 7, 8]
          -   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          -   *  count ==> [2, 1, 3, 1, 2]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 0`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx, count = unique_with_counts(x, axis=0)
          -   *  y ==> [[1, 0, 0],
          -   *         [2, 0, 0]]
          -   *  idx ==> [0, 0, 1]
          -   *  count ==> [2, 1]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 1`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx, count = unique_with_counts(x, axis=1)
          -   *  y ==> [[1, 0],
          -   *         [1, 0],
          -   *         [2, 0]]
          -   *  idx ==> [0, 1, 1]
          -   *  count ==> [1, 2]
          -   *  }
          + * Creates a rank-1 constant of {@code long} elements representing the size of each dimensions of + * the given shape. * - * @param data type for {@code y()} output - * @param data type for {@code idx()} output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to - * find the unique elements. - * @param outIdx - * @return a new instance of UniqueWithCounts + * @param scope is a scope used to add the underlying operation. + * @param shape a shape + * @return a long constant */ - public UniqueWithCounts uniqueWithCounts( - Operand x, Operand axis, DataType outIdx) { - return UniqueWithCounts.create(scope, x, axis, outIdx); + public Constant val(Shape shape) { + return Constant.create(scope, shape); } /** - * Converts an array of flat indices into a tuple of coordinate arrays. - *

          + * Create a constant from a Tensor. * - * Example: - *

          {@code
          -   *  y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])
          -   *  # 'dims' represent a hypothetical (3, 3) tensor of indices:
          -   *  # [[0, 1, *2*],
          -   *  #  [3, 4, *5*],
          -   *  #  [6, *7*, 8]]
          -   *  # For each entry from 'indices', this operation returns
          -   *  # its coordinates (marked with '*'), such as
          -   *  # 2 ==> (0, 2)
          -   *  # 5 ==> (1, 2)
          -   *  # 7 ==> (2, 1)
          -   *  y ==> [[0, 1, 2], [2, 2, 1]]
          -   *  }
          + * @param scope is a scope used to add the underlying operation. + * @param tensor a Tensor holding the constant value + * @return a constant of the same data type as `tensor` + */ + public Constant val(Tensor tensor) { + return Constant.create(scope, tensor); + } + + /** + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the given encoding. * - * @compatibility(numpy) Equivalent to np.unravel_index - * @end_compatibility - * @param data type for {@code output()} output - * @param indices An 0-D or 1-D `int` Tensor whose elements are indices into the - * flattened version of an array of dimensions dims. - * @param dims An 1-D `int` Tensor. The shape of the array to use for unraveling - * indices. - * @return a new instance of UnravelIndex + * @param scope is a scope used to add the underlying operation. + * @param charset charset used to encode/decode string bytes. + * @param data an n-dimensional array of {@code String} elements. + * @return a byte constant */ - public UnravelIndex unravelIndex(Operand indices, Operand dims) { - return UnravelIndex.create(scope, indices, dims); + public Constant val(Charset charset, NdArray data) { + return Constant.tensorOf(scope, charset, data); } /** - * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. - *

          - * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. - * For example, given a tensor of shape `(A, B, C, D)`; - *

          - * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` - * and each tensor in `output` will have shape `(B, C, D)`. (Note that the - * dimension unpacked along is gone, unlike `split`). - *

          - * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` - * and each tensor in `output` will have shape `(A, C, D)`. - * Etc. - *

          - * This is the opposite of `pack`. + * Creates a constant of {@code String} elements, using the given charset. * - * @param data type for {@code output()} output - * @param value 1-D or higher, with `axis` dimension size equal to `num`. - * @param num - * @param options carries optional attributes values - * @return a new instance of Unstack + * @param scope is a scope used to add the underlying operation. + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the {@code String} constant */ - public Unstack unstack(Operand value, Long num, - Unstack.Options... options) { - return Unstack.create(scope, value, num, options); + public Constant val(Charset charset, String[] data) { + return Constant.vectorOf(scope, charset, data); } /** - * Op is similar to a lightweight Dequeue. - *

          - * The basic functionality is similar to dequeue with many fewer - * capabilities and options. This Op is optimized for performance. + * Creates a {@code String} constant using a specified encoding. * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of Unstage + * @param scope is a scope used to add the underlying operation. + * @param charset The encoding from String to bytes. + * @param data The string to put into the new constant. + * @return a string constant */ - public Unstage unstage(List> dtypes, Unstage.Options... options) { - return Unstage.create(scope, dtypes, options); + public Constant val(Charset charset, String data) { + return Constant.scalarOf(scope, charset, data); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Operand.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Operand.java index d2079860edc..8b75cf57a39 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Operand.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Operand.java @@ -23,17 +23,19 @@ *

          Example usage: * *

          {@code
          + * Ops tf = Ops.create();
          + *
            * // The "decodeJpeg" operation can be used as an operand to the "cast" operation
          - * Operand decodeJpeg = ops.image.decodeJpeg(...);
          - * ops.dtypes.cast(decodeJpeg, TFloat32.DTYPE);
          + * Operand decodeJpeg = tf.image.decodeJpeg(...);
          + * tf.dtypes.cast(decodeJpeg, TFloat32.DTYPE);
            *
            * // The output "y" of the "unique" operation can be used as an operand to the "cast" operation
          - * Output y = ops.unique(...).y();
          - * ops.dtypes.cast(y, TFloat32.DTYPE);
          + * Output y = tf.unique(...).y();
          + * tf.dtypes.cast(y, TFloat32.DTYPE);
            *
            * // The "split" operation can be used as operand list to the "concat" operation
          - * Iterable> split = ops.split(...);
          - * ops.concat(split, ops.scalar(0));
          + * Iterable> split = tf.split(...);
          + * tf.concat(split, tf.val(0));
            * }
          */ public interface Operand { diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java index b79fdc4de8d..45f641dc3ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java @@ -15,8 +15,6 @@ package org.tensorflow.op.core; -import static java.nio.charset.StandardCharsets.UTF_8; - import java.nio.ByteBuffer; import java.nio.DoubleBuffer; import java.nio.FloatBuffer; @@ -33,19 +31,41 @@ import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.Operator; +import org.tensorflow.tools.Shape; +import org.tensorflow.tools.ndarray.BooleanNdArray; +import org.tensorflow.tools.ndarray.ByteNdArray; +import org.tensorflow.tools.ndarray.DoubleNdArray; +import org.tensorflow.tools.ndarray.FloatNdArray; +import org.tensorflow.tools.ndarray.IntNdArray; +import org.tensorflow.tools.ndarray.LongNdArray; +import org.tensorflow.tools.ndarray.NdArray; +import org.tensorflow.tools.ndarray.NdArrays; +import org.tensorflow.tools.ndarray.StdArrays; import org.tensorflow.types.TBool; -import org.tensorflow.types.TFloat64; import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; +import org.tensorflow.types.TUint8; import org.tensorflow.types.family.TType; /** * An operator producing a constant value. + * + *

          All endpoints of this operator are named `val`, except those accepting vararg + * elements in parameter, which are named `array`. For example: + * + *

          {@code
          + * Ops tf = Ops.create();
          + * tf.val(1.0f);  // mapped to Constant.scalarOf(scope, float);
          + * tf.val(new float[] {1.0f, 2.0f});  // mapped to Constant.vectorOf(scope, float[])
          + * tf.val(new float[][] { {1.0f, 2.0f}, {3.0f, 4.0f} });  //mapped to Constant.tensorOf(scope, float[][])
          + * tf.array(1.0f, 2.0f, 3.0f);  // mapped to Constant.arrayOf(scope, float...)
          + * }
          */ @Operator -public final class Constant extends Const { +public final class Constant extends PrimitiveOp implements Operand { /** * Creates a constant containing a single {@code int} element. @@ -53,11 +73,9 @@ public final class Constant extends Const { * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return an integer constant - * @deprecated use {@link Ops#scalar(int)} instead */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, int data) { + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, int data) { try (Tensor value = TInt32.scalarOf(data)) { return create(scope, value); } @@ -69,28 +87,43 @@ public static Constant create(Scope scope, int data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#vector(int...)} instead + * @return an integer constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, int[] data) { + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, int[] data) { try (Tensor value = TInt32.vectorOf(data)) { return create(scope, value); } } + /** + * Creates a constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a float constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, int... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); + } + /** * Creates a rank-2 constant of {@code int} elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt32>)} instead + * @return an integer constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, int[][] data) { - return create(scope, data, TInt32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, int[][] data) { + try (Tensor value = TInt32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -99,12 +132,13 @@ public static Constant create(Scope scope, int[][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt32>)} instead + * @return an integer constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, int[][][] data) { - return create(scope, data, TInt32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, int[][][] data) { + try (Tensor value = TInt32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -113,12 +147,13 @@ public static Constant create(Scope scope, int[][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt32>)} instead + * @return an integer constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, int[][][][] data) { - return create(scope, data, TInt32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, int[][][][] data) { + try (Tensor value = TInt32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -127,12 +162,13 @@ public static Constant create(Scope scope, int[][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt32>)} instead + * @return an integer constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, int[][][][][] data) { - return create(scope, data, TInt32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, int[][][][][] data) { + try (Tensor value = TInt32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -141,12 +177,27 @@ public static Constant create(Scope scope, int[][][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt32>)} instead + * @return an integer constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, int[][][][][][] data) { - return create(scope, data, TInt32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, int[][][][][][] data) { + try (Tensor value = TInt32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code int} elements. + * @return an integer constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, IntNdArray data) { + try (Tensor value = TInt32.tensorOf(data)) { + return create(scope, value); + } } /** @@ -162,7 +213,7 @@ public static Constant create(Scope scope, int[][][][][][] data) { * @param data a buffer containing the tensor data. * @return an integer constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt32>)} instead + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TInt32>)} instead */ @Endpoint @Deprecated @@ -178,12 +229,12 @@ public static Constant create(Scope scope, long[] shape, IntBuffer data) * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a float constant - * @deprecated use {@link Ops#scalar(float)} instead */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, float data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, float data) { + try (Tensor value = TFloat32.scalarOf(data)) { + return create(scope, value); + } } /** @@ -192,12 +243,28 @@ public static Constant create(Scope scope, float data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#vector(float...)} instead + * @return a float constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, float[] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, float[] data) { + try (Tensor value = TFloat32.vectorOf(data)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code float} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a float constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, float... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); } /** @@ -206,12 +273,13 @@ public static Constant create(Scope scope, float[] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat32>)} instead + * @return a float constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, float[][] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, float[][] data) { + try (Tensor value = TFloat32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -220,12 +288,13 @@ public static Constant create(Scope scope, float[][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat32>)} instead + * @return a float constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, float[][][] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, float[][][] data) { + try (Tensor value = TFloat32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -234,12 +303,13 @@ public static Constant create(Scope scope, float[][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat32>)} instead + * @return a float constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, float[][][][] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, float[][][][] data) { + try (Tensor value = TFloat32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -248,12 +318,13 @@ public static Constant create(Scope scope, float[][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat32>)} instead + * @return a float constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, float[][][][][] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, float[][][][][] data) { + try (Tensor value = TFloat32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -262,12 +333,27 @@ public static Constant create(Scope scope, float[][][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat32>)} instead + * @return a float constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, float[][][][][][] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, float[][][][][][] data) { + try (Tensor value = TFloat32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code float} elements. + * @return a float constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, FloatNdArray data) { + try (Tensor value = TFloat32.tensorOf(data)) { + return create(scope, value); + } } /** @@ -283,7 +369,7 @@ public static Constant create(Scope scope, float[][][][][][] data) { * @param data a buffer containing the tensor data. * @return a float constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat32>)} instead + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TFloat32>)} instead */ @Endpoint @Deprecated @@ -299,12 +385,12 @@ public static Constant create(Scope scope, long[] shape, FloatBuffer d * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a double constant - * @deprecated use {@link Ops#scalar(double)} instead */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, double data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, double data) { + try (Tensor value = TFloat64.scalarOf(data)) { + return create(scope, value); + } } /** @@ -313,12 +399,28 @@ public static Constant create(Scope scope, double data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#vector(double...)} instead + * @return a double constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, double[] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, double[] data) { + try (Tensor value = TFloat64.vectorOf(data)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code double} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a double constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, double... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); } /** @@ -327,12 +429,13 @@ public static Constant create(Scope scope, double[] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat64>)} instead + * @return a double constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, double[][] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, double[][] data) { + try (Tensor value = TFloat64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -341,12 +444,13 @@ public static Constant create(Scope scope, double[][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat64>)} instead + * @return a double constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, double[][][] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, double[][][] data) { + try (Tensor value = TFloat64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -355,12 +459,13 @@ public static Constant create(Scope scope, double[][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat64>)} instead + * @return a double constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, double[][][][] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, double[][][][] data) { + try (Tensor value = TFloat64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -369,12 +474,13 @@ public static Constant create(Scope scope, double[][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat64>)} instead + * @return a double constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, double[][][][][] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, double[][][][][] data) { + try (Tensor value = TFloat64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -383,12 +489,27 @@ public static Constant create(Scope scope, double[][][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat64>)} instead + * @return a double constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, double[][][][][][] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, double[][][][][][] data) { + try (Tensor value = TFloat64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code double} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code double} elements. + * @return a double constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, DoubleNdArray data) { + try (Tensor value = TFloat64.tensorOf(data)) { + return create(scope, value); + } } /** @@ -404,7 +525,7 @@ public static Constant create(Scope scope, double[][][][][][] data) { * @param data a buffer containing the tensor data. * @return a double constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TFloat64>)} instead + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TFloat64>)} instead */ @Endpoint @Deprecated @@ -420,12 +541,12 @@ public static Constant create(Scope scope, long[] shape, DoubleBuffer * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a long constant - * @deprecated use {@link Ops#scalar(long)} instead */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, long data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, long data) { + try (Tensor value = TInt64.scalarOf(data)) { + return create(scope, value); + } } /** @@ -434,12 +555,13 @@ public static Constant create(Scope scope, long data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#vector(long...)} instead + * @return a long constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, long[] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, long[] data) { + try (Tensor value = TInt64.vectorOf(data)) { + return create(scope, value); + } } /** @@ -448,12 +570,28 @@ public static Constant create(Scope scope, long[] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt64>)} instead + * @return a long constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, long[][] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, long[][] data) { + try (Tensor value = TInt64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a long constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, long... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); } /** @@ -462,12 +600,13 @@ public static Constant create(Scope scope, long[][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt64>)} instead + * @return a long constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, long[][][] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, long[][][] data) { + try (Tensor value = TInt64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -476,12 +615,13 @@ public static Constant create(Scope scope, long[][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt64>)} instead + * @return a long constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, long[][][][] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, long[][][][] data) { + try (Tensor value = TInt64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -490,12 +630,13 @@ public static Constant create(Scope scope, long[][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt64>)} instead + * @return a long constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, long[][][][][] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, long[][][][][] data) { + try (Tensor value = TInt64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -504,12 +645,27 @@ public static Constant create(Scope scope, long[][][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt64>)} instead + * @return a long constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, long[][][][][][] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, long[][][][][][] data) { + try (Tensor value = TInt64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code long} elements. + * @return a long constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, LongNdArray data) { + try (Tensor value = TInt64.tensorOf(data)) { + return create(scope, value); + } } /** @@ -525,7 +681,7 @@ public static Constant create(Scope scope, long[][][][][][] data) { * @param data a buffer containing the tensor data. * @return a long constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TInt64>)} instead + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TInt64>)} instead */ @Endpoint @Deprecated @@ -541,12 +697,12 @@ public static Constant create(Scope scope, long[] shape, LongBuffer data * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a boolean constant - * @deprecated use {@link Ops#scalar(boolean)} instead */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, boolean data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, boolean data) { + try (Tensor value = TBool.scalarOf(data)) { + return create(scope, value); + } } /** @@ -555,12 +711,28 @@ public static Constant create(Scope scope, boolean data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#vector(boolean...)} instead + * @return a boolean constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, boolean[] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, boolean[] data) { + try (Tensor value = TBool.vectorOf(data)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code boolean} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a boolean constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, boolean... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); } /** @@ -569,12 +741,13 @@ public static Constant create(Scope scope, boolean[] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TBool>)} instead + * @return a boolean constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, boolean[][] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, boolean[][] data) { + try (Tensor value = TBool.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -583,12 +756,13 @@ public static Constant create(Scope scope, boolean[][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TBool>)} instead + * @return a boolean constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, boolean[][][] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, boolean[][][] data) { + try (Tensor value = TBool.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -597,12 +771,13 @@ public static Constant create(Scope scope, boolean[][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TBool>)} instead + * @return a boolean constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, boolean[][][][] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, boolean[][][][] data) { + try (Tensor value = TBool.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -611,12 +786,13 @@ public static Constant create(Scope scope, boolean[][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TBool>)} instead + * @return a boolean constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, boolean[][][][][] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, boolean[][][][][] data) { + try (Tensor value = TBool.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -625,127 +801,160 @@ public static Constant create(Scope scope, boolean[][][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TBool>)} instead + * @return a boolean constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, boolean[][][][][][] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, boolean[][][][][][] data) { + try (Tensor value = TBool.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** - * Creates a {@code String} constant using the default, UTF-8 encoding. + * Creates a constant of {@code boolean} elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data The string to put into the new constant. - * @return a string constant - * @deprecated use {@link Ops#scalar(String)} instead + * @param data an n-dimensional array of {@code boolean} elements. + * @return a boolean constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, String data) { - return create(scope, data, UTF_8); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, BooleanNdArray data) { + try (Tensor value = TBool.tensorOf(data)) { + return create(scope, value); + } } /** - * Creates a {@code String} constant using a specified encoding. + * Creates a constant containing a single {@code byte} element. * * @param scope is a scope used to add the underlying operation. - * @param charset The encoding from String to bytes. - * @param data The string to put into the new constant. - * @return a string constant - * @deprecated use {@link Ops#scalar(Charset, String)} instead + * @param data The value to put into the new constant. + * @return a byte constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, String data, Charset charset) { - try (Tensor value = Tensor.create(data.getBytes(charset), TString.DTYPE)) { + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, byte data) { + try (Tensor value = TUint8.scalarOf(data)) { return create(scope, value); } } /** - * Creates a constant containing a single {@code String} element, represented as an array of {@code byte}s. + * Creates a rank-1 constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TString>)} instead + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, byte[] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, byte[] data) { + try (Tensor value = TUint8.vectorOf(data)) { + return create(scope, value); + } } /** - * Creates a rank-1 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Creates a constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TString>)} instead + * @param data An array containing the values to put into the new constant. + * @return a byte constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, byte[][] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, byte... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); } /** - * Creates a rank-2 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Creates a rank-2 constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TString>)} instead + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, byte[][][] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, byte[][] data) { + try (Tensor value = TUint8.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data))) { + return create(scope, value); + } } /** - * Creates a rank-3 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Creates a rank-3 constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TString>)} instead + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, byte[][][][] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, byte[][][] data) { + try (Tensor value = TUint8.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data))) { + return create(scope, value); + } } /** - * Creates a rank-4 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Creates a rank-4 constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TString>)} instead + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, byte[][][][][] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, byte[][][][] data) { + try (Tensor value = TUint8.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data))) { + return create(scope, value); + } } /** - * Creates a rank-5 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Creates a rank-5 constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. - * @deprecated use {@link Ops#constant(Tensor) Ops.constant(Tensor<TString>)} instead + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - @Endpoint - @Deprecated - public static Constant create(Scope scope, byte[][][][][][] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, byte[][][][][] data) { + try (Tensor value = TUint8.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data))) { + return create(scope, value); + } + } + + /** + * Creates a rank-6 constant of {@code byte} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, byte[][][][][][] data) { + try (Tensor value = TUint8.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code byte} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code byte} elements. + * @return a byte constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, ByteNdArray data) { + try (Tensor value = TUint8.tensorOf(data)) { + return create(scope, value); + } } /** @@ -763,7 +972,7 @@ public static Constant create(Scope scope, byte[][][][][][] data) { * @return a constant of type `type` * @throws IllegalArgumentException If the tensor datatype or shape is not compatible with the * buffer - * @deprecated use {@link Ops#constant(Tensor)} instead + * @deprecated use {@link Ops#val(Tensor)} instead */ @Endpoint @Deprecated @@ -773,6 +982,203 @@ public static Constant create(Scope scope, DataType type } } + /** + * Creates a {@code String} constant using the default, UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data The string to put into the new constant. + * @return a string constant + */ + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, String data) { + try (Tensor value = TString.scalarOf(data)) { + return create(scope, value); + } + } + + /** + * Creates a {@code String} constant using a specified encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param charset The encoding from String to bytes. + * @param data The string to put into the new constant. + * @return a string constant + */ + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, Charset charset, String data) { + try (Tensor value = TString.tensorOf(charset, NdArrays.scalarOfObject(data))) { + return create(scope, value); + } + } + + /** + * Creates a rank-1 constant of {@code String} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant vectorOf(Scope scope, String[] data) { + NdArray src = NdArrays.vectorOfObjects(data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code String} elements, using the given charset. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the {@code String} constant + */ + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, Charset charset, String[] data) { + try (Tensor value = TString.tensorOf(charset, NdArrays.vectorOfObjects(data))) { + return Constant.create(scope, value); + } + } + + /** + * Creates a constant of {@code String} elements, using the default UTF-8 charset. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return the {@code String} constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, String... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); + } + + /** + * Creates a constant of {@code String} elements, using the given charset. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the {@code String} constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, Charset charset, String... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, charset, data); + } + + /** + * Creates a rank-2 constant of {@code String} elements, using default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant tensorOf(Scope scope, String[][] data) { + NdArray src = NdArrays.ofObjects(String.class, StdArrays.shapeOf(data)); + StdArrays.copyTo(src, data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a rank-3 constant of {@code String} elements, using default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant tensorOf(Scope scope, String[][][] data) { + NdArray src = NdArrays.ofObjects(String.class, StdArrays.shapeOf(data)); + StdArrays.copyTo(src, data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a rank-4 constant of {@code String} elements, using default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant tensorOf(Scope scope, String[][][][] data) { + NdArray src = NdArrays.ofObjects(String.class, StdArrays.shapeOf(data)); + StdArrays.copyTo(src, data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a rank-5 constant of {@code String} elements, using default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant tensorOf(Scope scope, String[][][][][] data) { + NdArray src = NdArrays.ofObjects(String.class, StdArrays.shapeOf(data)); + StdArrays.copyTo(src, data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a rank-6 constant of {@code String} elements, using default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant tensorOf(Scope scope, String[][][][][][] data) { + NdArray src = NdArrays.ofObjects(String.class, StdArrays.shapeOf(data)); + StdArrays.copyTo(src, data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code String} elements. + * @return a byte constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, NdArray data) { + try (Tensor value = TString.tensorOf(data)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the given encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset used to encode/decode string bytes. + * @param data an n-dimensional array of {@code String} elements. + * @return a byte constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, Charset charset, NdArray data) { + try (Tensor value = TString.tensorOf(charset, data)) { + return create(scope, value); + } + } + /** * Create a constant from a Java object. * @@ -788,7 +1194,7 @@ public static Constant create(Scope scope, DataType type * @param object a Java object representing the constant. * @return a constant of type `type` * @see org.tensorflow.Tensor#create(Object) Tensor.create - * @deprecated use {@link Ops#constant(Tensor)} instead + * @deprecated use {@link Ops#val(Tensor)} instead */ @Endpoint @Deprecated @@ -798,6 +1204,19 @@ public static Constant create(Scope scope, Object object, D } } + /** + * Creates a rank-1 constant of {@code long} elements representing the size of each dimensions of + * the given shape. + * + * @param scope is a scope used to add the underlying operation. + * @param shape a shape + * @return a long constant + */ + @Endpoint(name = "val") + public static Constant create(Scope scope, Shape shape) { + return vectorOf(scope, shape.asArray()); + } + /** * Create a constant from a Tensor. * @@ -805,12 +1224,26 @@ public static Constant create(Scope scope, Object object, D * @param tensor a Tensor holding the constant value * @return a constant of the same data type as `tensor` */ - @Endpoint + @Endpoint(name = "val") public static Constant create(Scope scope, Tensor tensor) { - return new Constant<>(buildConstOp(scope, tensor)); + return new Constant<>( + scope + .env() + .opBuilder("Const", scope.makeOpName("Const")) + .setAttr("value", tensor) + .setAttr("dtype", tensor.dataType()) + .build()); + } + + @Override + public Output asOutput() { + return output; } private Constant(Operation operation) { super(operation); + output = operation.output(0); } + + private final Output output; } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Gradients.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Gradients.java index 2132ef22af2..bcc2032da64 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Gradients.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Gradients.java @@ -45,7 +45,7 @@ * Example of usage: *
          {@code
            * Gradients gradients = tf.gradients(loss, Arrays.asList(w, b));
          - * Scalar alpha = ops.scalar(1.0f);
          + * Constant alpha = tf.val(1.0f);
            * tf.train.applyGradientDescent(w, alpha, gradients.dy(0));
            * tf.train.applyGradientDescent(b, alpha, gradients.dy(1));
            * }
          diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Zeros.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Zeros.java index ac823d26ab4..612af709e4a 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Zeros.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Zeros.java @@ -30,9 +30,9 @@ * An operator creating a constant initialized with zeros of the shape given by `dims`. * *

          For example, the following expression - *

          {@code ops.zeros(ops.vector(shape), TFloat32.DTYPE)
          + *
          {@code tf.zeros(tf.val(shape), TFloat32.DTYPE)
          * is the equivalent of - *
          {@code ops.fill(ops.vector(shape), ops.scalar(0.0f))
          + *
          {@code tf.fill(tf.val(shape), tf.val(0.0f))
          * * @param constant type */ diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GeneratedOperationsTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GeneratedOperationsTest.java index 99338967b70..08004ad5fc1 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GeneratedOperationsTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GeneratedOperationsTest.java @@ -37,7 +37,7 @@ public void tensorInputTensorOutput() { try (Graph g = new Graph(); Session sess = new Session(g)) { Ops ops = Ops.create(g); - Operand x = ops.math.add(ops.scalar(1), ops.scalar(2)); + Operand x = ops.math.add(ops.val(1), ops.val(2)); try (Tensor result = sess.runner().fetch(x).run().get(0).expect(TInt32.DTYPE)) { assertEquals(3, result.intValue()); } @@ -50,9 +50,9 @@ public void testListInputTensorOutput() { Session sess = new Session(g)) { Ops ops = Ops.create(g); ArrayList> inputs = new ArrayList<>(); - inputs.add(ops.scalar(1)); - inputs.add(ops.scalar(2)); - inputs.add(ops.scalar(3)); + inputs.add(ops.val(1)); + inputs.add(ops.val(2)); + inputs.add(ops.val(3)); Operand x = ops.math.addN(inputs); try (Tensor result = sess.runner().fetch(x).run().get(0).expect(TInt32.DTYPE)) { assertEquals(6, result.intValue()); @@ -73,11 +73,11 @@ public void testControlDependencies() { Session sess = new Session(g)) { Ops ops = Ops.create(g); Operand variable = ops.variable(Shape.scalar(), TInt32.DTYPE); - Operand initVariable = ops.assign(variable, ops.scalar(0)); + Operand initVariable = ops.assign(variable, ops.val(0)); ArrayList> controls = new ArrayList<>(); - controls.add(ops.assign(variable, ops.scalar(3))); + controls.add(ops.assign(variable, ops.val(3))); Operand x = - ops.withControlDependencies(controls).math.add(variable, ops.scalar(0)); + ops.withControlDependencies(controls).math.add(variable, ops.val(0)); sess.runner().addTarget(initVariable).run(); try (Tensor result = sess.runner().fetch(x).run().get(0).expect(TInt32.DTYPE)) { assertEquals(3, result.intValue()); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java index 6ae2b1bcdd0..e0f06415dde 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java @@ -44,7 +44,7 @@ public void createIntZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Vector.create(scope, shape), TInt32.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TInt32.DTYPE); try (Tensor result = sess.runner().fetch(op).run().get(0)) { int[][] actual = result.expect(TInt32.DTYPE).copyTo(new int[(int)shape[0]][(int)shape[1]]); for (int i = 0; i < actual.length; ++i) { @@ -62,7 +62,7 @@ public void createFloatZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Vector.create(scope, shape), TFloat32.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TFloat32.DTYPE); try (Tensor result = sess.runner().fetch(op.asOutput()).run().get(0)) { float[][] actual = result.expect(TFloat32.DTYPE).copyTo(new float[(int)shape[0]][(int)shape[1]]); for (int i = 0; i < actual.length; ++i) { @@ -80,7 +80,7 @@ public void createDoubleZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Vector.create(scope, shape), TFloat64.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TFloat64.DTYPE); try (Tensor result = sess.runner().fetch(op.asOutput()).run().get(0)) { double[][] actual = result.expect(TFloat64.DTYPE).copyTo(new double[(int)shape[0]][(int)shape[1]]); for (int i = 0; i < actual.length; ++i) { @@ -98,7 +98,7 @@ public void createLongZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Vector.create(scope, shape), TInt64.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TInt64.DTYPE); try (Tensor result = sess.runner().fetch(op.asOutput()).run().get(0)) { long[][] actual = result.expect(TInt64.DTYPE).copyTo(new long[(int)shape[0]][(int)shape[1]]); for (int i = 0; i < actual.length; ++i) { @@ -116,7 +116,7 @@ public void createBooleanZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Vector.create(scope, shape), TBool.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TBool.DTYPE); try (Tensor result = sess.runner().fetch(op.asOutput()).run().get(0)) { boolean[][] actual = result.expect(TBool.DTYPE).copyTo(new boolean[(int)shape[0]][(int)shape[1]]); for (int i = 0; i < actual.length; ++i) { @@ -134,7 +134,7 @@ public void createUint8Zeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Vector.create(scope, shape), TUint8.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TUint8.DTYPE); try (Tensor result = sess.runner().fetch(op.asOutput()).run().get(0)) { byte[][] actual = result.expect(TUint8.DTYPE).copyTo(new byte[(int)shape[0]][(int)shape[1]]); result.copyTo(actual); @@ -153,7 +153,7 @@ public void cannotCreateStringZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros.create(scope, Vector.create(scope, shape), TString.DTYPE); + Zeros.create(scope, Constant.vectorOf(scope, shape), TString.DTYPE); } } @@ -163,7 +163,7 @@ public void operationsComposingZerosAreCorrectlyNamed() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros zeros = Zeros.create(scope.withSubScope("test"), Vector.create(scope, shape), TFloat32.DTYPE); + Zeros zeros = Zeros.create(scope.withSubScope("test"), Constant.vectorOf(scope, shape), TFloat32.DTYPE); List> results = sess.runner().addTarget("test/Zeros/Zero").addTarget("test/Zeros/Fill").run(); } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/NumericTypesTestBase.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/NumericTypesTestBase.java index 0448d367a3c..55c2ee3b37e 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/NumericTypesTestBase.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/NumericTypesTestBase.java @@ -48,11 +48,11 @@ public void initializeTensorsWithZeros() { // Initialize tensor memory with zeros and take a snapshot tensorData.scalars().forEach(scalar -> scalar.setObject(valueOf(0))); - Constant x = tf.constant(tensor); + Constant x = tf.val(tensor); // Initialize the same tensor memory with ones and take a snapshot tensorData.scalars().forEach(scalar -> scalar.setObject(valueOf(1))); - Constant y = tf.constant(tensor); + Constant y = tf.val(tensor); // Subtract y from x and validate the result Sub sub = tf.math.sub(x, y); @@ -93,7 +93,7 @@ public void genericTest() { Ops tf = Ops.create(session); // Compute the power of the tensor by itself - Constant x = tf.constant(tensor); + Constant x = tf.val(tensor); IntNdArray result = tf.math.pow(x, x).data(); // Validate result by computing the same operation in Java diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index 2e096152d2b..4d71b7ed978 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -471,27 +471,27 @@ private static TypeSpec buildTopClass(OpsSpec spec) { + "{@link $T @Operator} is exposed\n" + "by this API or one of its subgroup.\n

          Example usage:\n

          {@code\n"
                               + "try (Graph g = new Graph()) {\n"
          -                    + "  Ops ops = Ops.create(g);\n"
          +                    + "  Ops tf = Ops.create(g);\n"
                               + "  // Operations are typed classes with convenience\n"
                               + "  // builders in Ops.\n"
          -                    + "  Constant three = ops.scalar(3);\n"
          +                    + "  Constant three = tf.val(3);\n"
                               + "  // Single-result operations implement the Operand\n"
                               + "  // interface, so this works too.\n"
          -                    + "  Operand four = ops.scalar(4);\n"
          +                    + "  Operand four = tf.val(4);\n"
                               + "  // Most builders are found within a group, and accept\n"
                               + "  // Operand types as operands\n"
          -                    + "  Operand nine = ops.math.add(four, ops.scalar(5));\n"
          +                    + "  Operand nine = tf.math.add(four, tf.val(5));\n"
                               + "  // Multi-result operations however offer methods to\n"
                               + "  // select a particular result for use.\n"
          -                    + "  Operand result = \n"
          -                    + "      ops.math.add(ops.unique(s, a).y(), b);\n"
          +                    + "  Operand result = \n"
          +                    + "      tf.math.add(tf.unique(s, a).y(), b);\n"
                               + "  // Optional attributes\n"
          -                    + "  ops.linalg.matMul(a, b, MatMul.transposeA(true));\n"
          +                    + "  tf.linalg.matMul(a, b, MatMul.transposeA(true));\n"
                               + "  // Naming operators\n"
          -                    + "  ops.withName(\"foo\").scalar(5); // name \"foo\"\n"
          +                    + "  tf.withName(\"foo\").val(5); // name \"foo\"\n"
                               + "  // Names can exist in a hierarchy\n"
          -                    + "  Ops sub = ops.withSubScope(\"sub\");\n"
          -                    + "  sub.withName(\"bar\").scalar(4); // \"sub/bar\"\n"
          +                    + "  Ops sub = tf.withSubScope(\"sub\");\n"
          +                    + "  sub.withName(\"bar\").val(4); // \"sub/bar\"\n"
                               + "}\n"
                               + "}
          \n", T_OP,