Skip to content

Commit

Permalink
Fix CS1570 (XML comment has badly formed XML)
Browse files Browse the repository at this point in the history
  • Loading branch information
sharwell authored and Oceania2018 committed Jul 6, 2020
1 parent cef6ec0 commit 7884b24
Show file tree
Hide file tree
Showing 12 changed files with 44 additions and 51 deletions.
7 changes: 0 additions & 7 deletions Directory.Build.props
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,6 @@
-->
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<NoWarn>$(NoWarn),1573,1591,1712</NoWarn>

<!--
Suppress warnings for currently-invalid documentation comments.
CS1570: XML comment has badly formed XML
-->
<NoWarn>$(NoWarn),1570</NoWarn>
</PropertyGroup>

</Project>
2 changes: 1 addition & 1 deletion src/TensorFlowNET.Core/APIs/tf.array.cs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public Tensor batch_to_space_nd<T>(T input, int[] block_shape, int[,] crops, str
/// <typeparam name="T1"></typeparam>
/// <typeparam name="T2"></typeparam>
/// <param name="tensor">N-D tensor.</param>
/// <param name="mask">K-D boolean tensor, K <= N and K must be known statically.</param>
/// <param name="mask">K-D boolean tensor, K &lt;= N and K must be known statically.</param>
/// <param name="name"></param>
/// <param name="axis">A 0-D int Tensor representing the axis in tensor to mask from. </param>
/// <returns>(N-K+1)-dimensional tensor populated by entries in tensor corresponding to True values in mask.</returns>
Expand Down
6 changes: 3 additions & 3 deletions src/TensorFlowNET.Core/APIs/tf.math.cs
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ public Tensor greater_equal<Tx, Ty>(Tx x, Ty y, string name = null)
=> gen_math_ops.greater_equal(x, y, name);

/// <summary>
/// Returns the truth value of (x < y) element-wise.
/// Returns the truth value of (x &lt; y) element-wise.
/// </summary>
/// <typeparam name="Tx"></typeparam>
/// <typeparam name="Ty"></typeparam>
Expand All @@ -191,7 +191,7 @@ public Tensor lgamma(Tensor x, string name = null)
=> gen_math_ops.lgamma(x, name: name);

/// <summary>
/// Returns the truth value of (x <= y) element-wise.
/// Returns the truth value of (x &lt;= y) element-wise.
/// </summary>
/// <typeparam name="Tx"></typeparam>
/// <typeparam name="Ty"></typeparam>
Expand Down Expand Up @@ -344,7 +344,7 @@ public Tensor maximum<T1, T2>(T1 x, T2 y, string name = null)
=> gen_math_ops.maximum(x, y, name: name);

/// <summary>
/// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
/// Returns the min of x and y (i.e. x &lt; y ? x : y) element-wise.
/// </summary>
/// <typeparam name="T1"></typeparam>
/// <typeparam name="T2"></typeparam>
Expand Down
2 changes: 1 addition & 1 deletion src/TensorFlowNET.Core/Eager/c_api.eager.cs
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ public delegate void delete_backward_function_callback(string op_name,
/// <summary>
///
/// </summary>
/// <param name="t">const tensorflow::Tensor&</param>
/// <param name="t">const tensorflow::Tensor&amp;</param>
/// <returns>TFE_TensorHandle*</returns>
[DllImport(TensorFlowLibName)]
public static extern TFE_TensorHandle TFE_NewTensorHandle(IntPtr t, SafeStatusHandle status);
Expand Down
4 changes: 2 additions & 2 deletions src/TensorFlowNET.Core/Gradients/math_grad.cs
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,7 @@ private static Tensor[] _MinOrMaxGrad(Operation op, Tensor[] grads)
}

/// <summary>
/// Returns grad*(x > y, x <= y) with type of grad.
/// Returns grad*(x > y, x &lt;= y) with type of grad.
/// </summary>
/// <param name="op"></param>
/// <param name="grads"></param>
Expand All @@ -405,7 +405,7 @@ public static Tensor[] _MaximumGrad(Operation op, Tensor[] grads)
}

/// <summary>
/// Returns grad*(x < y, x >= y) with type of grad.
/// Returns grad*(x &lt; y, x >= y) with type of grad.
/// </summary>
/// <param name="op"></param>
/// <param name="grads"></param>
Expand Down
2 changes: 1 addition & 1 deletion src/TensorFlowNET.Core/Operations/gen_logging_ops.cs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ public static Tensor histogram_summary(string tag, Tensor values, string name =
/// Tags for the summary.
/// </param>
/// <param name="values">
/// Same shape as <c>tags. Values for the summary.
/// Same shape as <c>tags</c>. Values for the summary.
/// </param>
/// <param name="name">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScalarSummary'.
Expand Down
60 changes: 30 additions & 30 deletions src/TensorFlowNET.Core/Operations/gen_ops.cs
Original file line number Diff line number Diff line change
Expand Up @@ -6154,11 +6154,11 @@ public static Tensor crop_and_resize (Tensor image, Tensor boxes, Tensor box_ind
/// in normalized coordinates <c>[y1, x1, y2, x2]</c>. A normalized coordinate value of
/// <c>y</c> is mapped to the image coordinate at <c>y * (image_height - 1)</c>, so as the
/// <c>[0, 1]</c> interval of normalized image height is mapped to
/// <c>[0, image_height - 1] in image height coordinates. We do allow y1 &amp;gt; y2, in
/// <c>[0, image_height - 1]</c> in image height coordinates. We do allow y1 &amp;gt; y2, in
/// which case the sampled crop is an up-down flipped version of the original
/// image. The width dimension is treated similarly. Normalized coordinates
/// outside the </c>[0, 1]<c> range are allowed, in which case we use
/// </c>extrapolation_value<c> to extrapolate the input image values.
/// outside the <c>[0, 1]</c> range are allowed, in which case we use
/// <c>extrapolation_value</c> to extrapolate the input image values.
/// </param>
/// <param name="box_ind">
/// A 1-D tensor of shape <c>[num_boxes]</c> with int32 values in <c>[0, batch)</c>.
Expand Down Expand Up @@ -6200,11 +6200,11 @@ public static Tensor crop_and_resize_grad_boxes (Tensor grads, Tensor image, Ten
/// in normalized coordinates <c>[y1, x1, y2, x2]</c>. A normalized coordinate value of
/// <c>y</c> is mapped to the image coordinate at <c>y * (image_height - 1)</c>, so as the
/// <c>[0, 1]</c> interval of normalized image height is mapped to
/// <c>[0, image_height - 1] in image height coordinates. We do allow y1 &amp;gt; y2, in
/// <c>[0, image_height - 1]</c> in image height coordinates. We do allow y1 &amp;gt; y2, in
/// which case the sampled crop is an up-down flipped version of the original
/// image. The width dimension is treated similarly. Normalized coordinates
/// outside the </c>[0, 1]<c> range are allowed, in which case we use
/// </c>extrapolation_value<c> to extrapolate the input image values.
/// outside the <c>[0, 1]</c> range are allowed, in which case we use
/// <c>extrapolation_value</c> to extrapolate the input image values.
/// </param>
/// <param name="box_ind">
/// A 1-D tensor of shape <c>[num_boxes]</c> with int32 values in <c>[0, batch)</c>.
Expand Down Expand Up @@ -15982,9 +15982,9 @@ public static Tensor matrix_determinant (Tensor input, string name = "MatrixDete
/// everything else padded with zeros. The diagonal is computed as follows:
///
/// Assume <c>diagonal</c> has <c>k</c> dimensions <c>[I, J, K, ..., N]</c>, then the output is a
/// tensor of rank <c>k+1</c> with dimensions [I, J, K, ..., N, N]<c> where:
/// tensor of rank <c>k+1</c> with dimensions <c>[I, J, K, ..., N, N]</c> where:
///
/// </c>output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]<c>.
/// <c>output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]</c>.
///
/// For example:
///
Expand Down Expand Up @@ -18540,7 +18540,8 @@ public static Tensor nth_element (Tensor input, Tensor n, bool? reverse = null,
/// ][
/// [0.0, 1.0, 0.0] // one_hot(1)
/// [0.0, 0.0, 0.0] // one_hot(-1)
/// ]<c></c><c>
/// ]
/// </code>
/// </remarks>
public static Tensor one_hot (Tensor indices, Tensor depth, Tensor on_value, Tensor off_value, int? axis = null, string name = "OneHot")
{
Expand Down Expand Up @@ -21850,7 +21851,6 @@ public static (Tensor activations, Tensor min_activations, Tensor max_activation
/// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property.
/// </returns>
/// <remarks>
/// <code>
/// </remarks>
public static (Tensor output, Tensor output_min, Tensor output_max) quantized_reshape (Tensor tensor, Tensor shape, Tensor input_min, Tensor input_max, string name = "QuantizedReshape")
{
Expand Down Expand Up @@ -26970,10 +26970,10 @@ public static Operation resource_sparse_apply_r_m_s_prop (Tensor var, Tensor ms,
/// <remarks>
/// The values of <c>value</c> are assigned to the positions in the variable
/// <c>ref</c> that are selected by the slice parameters. The slice parameters
/// <c>begin, </c>end<c>, </c>strides<c>, etc. work exactly as in </c>StridedSlice<c>.
/// <c>begin</c>, <c>end</c>, <c>strides</c>, etc. work exactly as in <c>StridedSlice</c>.
///
/// NOTE this op currently does not support broadcasting and so </c>value<c>'s
/// shape must be exactly the shape produced by the slice of </c>ref<c>.
/// NOTE this op currently does not support broadcasting and so <c>value</c>'s
/// shape must be exactly the shape produced by the slice of <c>ref</c>.
/// </remarks>
public static Operation resource_strided_slice_assign (Tensor referecne, Tensor begin, Tensor end, Tensor strides, Tensor value, int? begin_mask = null, int? end_mask = null, int? ellipsis_mask = null, int? new_axis_mask = null, int? shrink_axis_mask = null, string name = "ResourceStridedSliceAssign")
{
Expand Down Expand Up @@ -28068,7 +28068,7 @@ public static Operation save_v2 (Tensor prefix, Tensor tensor_names, Tensor shap
/// Tags for the summary.
/// </param>
/// <param name="values">
/// Same shape as <c>tags. Values for the summary.
/// Same shape as <c>tags</c>. Values for the summary.
/// </param>
/// <param name="name">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScalarSummary'.
Expand Down Expand Up @@ -34548,10 +34548,10 @@ public static Tensor strided_slice (Tensor input, Tensor begin, Tensor end, Tens
/// <remarks>
/// The values of <c>value</c> are assigned to the positions in the variable
/// <c>ref</c> that are selected by the slice parameters. The slice parameters
/// <c>begin, </c>end<c>, </c>strides<c>, etc. work exactly as in </c>StridedSlice<c>.
/// <c>begin</c>, <c>end</c>, <c>strides</c>, etc. work exactly as in <c>StridedSlice</c>.
///
/// NOTE this op currently does not support broadcasting and so </c>value<c>'s
/// shape must be exactly the shape produced by the slice of </c>ref<c>.
/// NOTE this op currently does not support broadcasting and so <c>value</c>'s
/// shape must be exactly the shape produced by the slice of <c>ref</c>.
/// </remarks>
public static Tensor strided_slice_assign (Tensor referecne, Tensor begin, Tensor end, Tensor strides, Tensor value, int? begin_mask = null, int? end_mask = null, int? ellipsis_mask = null, int? new_axis_mask = null, int? shrink_axis_mask = null, string name = "StridedSliceAssign")
{
Expand Down Expand Up @@ -36554,21 +36554,21 @@ public static Tensor tensor_array_split_v2 (Tensor handle, Tensor value, Tensor
/// and that <c>value</c> has shape
///
/// <code>
/// (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)<c></c><c>,
/// (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)</code>,
///
/// this splits values into a TensorArray with T tensors.
///
/// TensorArray index t will be the subtensor of values with starting position
///
/// </code>
/// <code>
/// (n0 + n1 + ... + n(t-1), 0, 0, ...)
/// <code>
/// </code>
///
/// and having size
///
/// </code>
/// <code>
/// nt x d0 x d1 x ...
/// <code>
/// </code>
/// </remarks>
public static Tensor tensor_array_split_v3 (Tensor handle, Tensor value, Tensor lengths, Tensor flow_in, string name = "TensorArraySplitV3")
{
Expand Down Expand Up @@ -38107,9 +38107,9 @@ public static (Tensor y, Tensor idx) unique (Tensor x, TF_DataType? out_idx = nu
/// This operation also returns a tensor <c>idx</c> that is the same size as
/// the number of the elements in <c>x</c> along the <c>axis</c> dimension. It
/// contains the index in the unique output <c>y</c>.
/// In other words, for an <c>1-D</c> tensor <c>x</c> with <c>axis = None:
/// In other words, for an <c>1-D</c> tensor <c>x</c> with <c>axis = None</c>:
///
/// </c>y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]<c>
/// <c>y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]</c>
///
/// For example:
///
Expand All @@ -38120,7 +38120,7 @@ public static (Tensor y, Tensor idx) unique (Tensor x, TF_DataType? out_idx = nu
/// idx ==&amp;gt; [0, 0, 1, 2, 2, 2, 3, 4, 4]
/// </code>
///
/// For an </c>2-D<c> tensor </c>x<c> with </c>axis = 0<c>:
/// For an <c>2-D</c> tensor <c>x</c> with <c>axis = 0</c>:
///
/// <code>
/// # tensor 'x' is [[1, 0, 0],
Expand All @@ -38132,7 +38132,7 @@ public static (Tensor y, Tensor idx) unique (Tensor x, TF_DataType? out_idx = nu
/// idx ==&amp;gt; [0, 0, 1]
/// </code>
///
/// For an </c>2-D<c> tensor </c>x<c> with </c>axis = 1<c>:
/// For an <c>2-D</c> tensor <c>x</c> with <c>axis = 1</c>:
///
/// <code>
/// # tensor 'x' is [[1, 0, 0],
Expand Down Expand Up @@ -38241,9 +38241,9 @@ public static (Tensor y, Tensor idx, Tensor count) unique_with_counts (Tensor x,
/// that are the same size as the number of the elements in <c>x</c> along the
/// <c>axis</c> dimension. The <c>idx</c> contains the index in the unique output <c>y</c>
/// and the <c>count</c> contains the count in the unique output <c>y</c>.
/// In other words, for an <c>1-D</c> tensor <c>x</c> with <c>axis = None:
/// In other words, for an <c>1-D</c> tensor <c>x</c> with <c>axis = None</c>:
///
/// </c>y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]<c>
/// <c>y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]</c>
///
/// For example:
///
Expand All @@ -38255,7 +38255,7 @@ public static (Tensor y, Tensor idx, Tensor count) unique_with_counts (Tensor x,
/// count ==&amp;gt; [2, 1, 3, 1, 2]
/// </code>
///
/// For an </c>2-D<c> tensor </c>x<c> with </c>axis = 0<c>:
/// For an <c>2-D</c> tensor <c>x</c> with <c>axis = 0</c>:
///
/// <code>
/// # tensor 'x' is [[1, 0, 0],
Expand All @@ -38268,7 +38268,7 @@ public static (Tensor y, Tensor idx, Tensor count) unique_with_counts (Tensor x,
/// count ==&amp;gt; [2, 1]
/// </code>
///
/// For an </c>2-D<c> tensor </c>x<c> with </c>axis = 1<c>:
/// For an <c>2-D</c> tensor <c>x</c> with <c>axis = 1</c>:
///
/// <code>
/// # tensor 'x' is [[1, 0, 0],
Expand Down
4 changes: 2 additions & 2 deletions src/TensorFlowNET.Core/Operations/math_ops.cs
Original file line number Diff line number Diff line change
Expand Up @@ -406,10 +406,10 @@ public static Tensor realdiv(Tensor x, Tensor y, string name = null)
/// Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
/// entry in `axis`. If `keepdims` is true, the reduced dimensions
/// are retained with length 1.

///
/// If `axis` has no entries, all dimensions are reduced, and a
/// tensor with a single element is returned.

///
/// This function is more numerically stable than log(sum(exp(input))). It avoids
/// overflows caused by taking the exp of large inputs and underflows caused by
/// taking the log of small inputs.
Expand Down
2 changes: 1 addition & 1 deletion src/TensorFlowNET.Core/Status/c_api.status.cs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ public partial class c_api
public static extern SafeStatusHandle TF_NewStatus();

/// <summary>
/// Record <code, msg> in *s. Any previous information is lost.
/// Record &lt;code, msg> in *s. Any previous information is lost.
/// A common use is to clear a status: TF_SetStatus(s, TF_OK, "");
/// </summary>
/// <param name="s"></param>
Expand Down
2 changes: 1 addition & 1 deletion src/TensorFlowNET.Core/Summaries/Summary.cs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ public Tensor scalar(string name, Tensor tensor, string[] collections = null, st
/// <summary>
/// Adds keys to a collection.
/// </summary>
/// <param name="val"The value to add per each key.></param>
/// <param name="val">The value to add per each key.</param>
/// <param name="collections">A collection of keys to add.</param>
/// <param name="default_collections">Used if collections is None.</param>
public void collect(ITensorOrOperation val, List<string> collections, List<string> default_collections)
Expand Down
2 changes: 1 addition & 1 deletion src/TensorFlowNET.Core/Tensors/c_api.tensor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ public partial class c_api

/// <summary>
/// Return the length of the tensor in the "dim_index" dimension.
/// REQUIRES: 0 <= dim_index < TF_NumDims(tensor)
/// REQUIRES: 0 &lt;= dim_index &lt; TF_NumDims(tensor)
/// </summary>
/// <param name="tensor"></param>
/// <param name="dim_index"></param>
Expand Down
2 changes: 1 addition & 1 deletion src/TensorFlowNET.Core/ops.cs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ public static Tensor internal_convert_to_tensor_or_composite(Tensor value, TF_Da
/// Wrapper for `Graph.control_dependencies()` using the default graph.
///
/// See `tf.Graph.control_dependencies` for more details.

///
/// When eager execution is enabled, any callable object in the `control_inputs`
/// list will be called.
/// </summary>
Expand Down

0 comments on commit 7884b24

Please sign in to comment.