Skip to content

Commit

Permalink
fix array_ops.squeeze
Browse files Browse the repository at this point in the history
  • Loading branch information
Oceania2018 committed Jan 8, 2022
1 parent 22d362e commit 1bc6988
Show file tree
Hide file tree
Showing 7 changed files with 26 additions and 93 deletions.
2 changes: 1 addition & 1 deletion .github/FUNDING.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ community_bridge: # Replace with a single Community Bridge project-name e.g., cl
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: ['https://bit.ly/2op1mu5']# Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
custom: []# Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
4 changes: 0 additions & 4 deletions src/TensorFlowNET.Core/Gradients/Tape.ComputeGradient.cs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ public Tensor[] ComputeGradient(Tensor[] target_tensor_ids,
tensor_tape_,
state.op_tape);

int gcCollectFlag = 0;
while (!op_stack.empty())
{
var op = op_stack.Dequeue();
Expand Down Expand Up @@ -155,9 +154,6 @@ public Tensor[] ComputeGradient(Tensor[] target_tensor_ids,
op_stack.Enqueue(op_id);
}
}

if (gcCollectFlag++ % 10 == 0)
GC.Collect();
}

if (state.op_tape.Count > 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,19 +90,20 @@ public Tensor Apply(Tensors input, Tensor filters)
strides.Insert(0, 1);
dilations.Insert(0, 1);
var expanded = tf.expand_dims(input, spatial_start_dim);
input = array_ops.expand_dims(input, spatial_start_dim);
filters = array_ops.expand_dims(filters, 0);
result = gen_nn_ops.conv2d(new Conv2dParams
{
Input = expanded,
Input = input,
Filter = filters,
Strides = strides.ToArray(),
Padding = padding,
DataFormat = channel_first ? "NCHW" : "NHWC",
Dilations = dilations.ToArray(),
Name = name
});
result = tf.squeeze(result, squeeze_dims: spatial_start_dim);
result = array_ops.squeeze(result, new[] { spatial_start_dim });
}
});

Expand Down
2 changes: 1 addition & 1 deletion src/TensorFlowNET.Core/Operations/array_ops.cs
Original file line number Diff line number Diff line change
Expand Up @@ -730,7 +730,7 @@ public static Tensor strided_slice_grad(Tensor shape, Tensor begin, Tensor end,
/// <returns>A `Tensor`. Has the same type as `input`.
/// Contains the same data as `input`, but has one or more dimensions of
/// size 1 removed.</returns>
public static Tensor squeeze(Tensor input, int[] axis = null, string name = null, int[] squeeze_dims = null)
public static Tensor squeeze(Tensor input, int[] axis = null, string name = null)
=> gen_array_ops.squeeze(input, axis, name);

public static Tensor identity(Tensor input, string name = null)
Expand Down
6 changes: 3 additions & 3 deletions src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,11 @@ public class Convolutional : Layer
public Convolutional(ConvolutionalArgs args) : base(args)
{
this.args = args;
args.KernelSize = conv_utils.normalize_tuple(args.KernelSize.dims.Select(x => (int)x).ToArray(), args.Rank, "kernel_size");
args.Strides = conv_utils.normalize_tuple(args.Strides.dims.Select(x => (int)x).ToArray(), args.Rank, "strides");
args.KernelSize = conv_utils.normalize_tuple(args.KernelSize.as_int_list(), args.Rank, "kernel_size");
args.Strides = conv_utils.normalize_tuple(args.Strides.as_int_list(), args.Rank, "strides");
args.Padding = conv_utils.normalize_padding(args.Padding);
args.DataFormat = conv_utils.normalize_data_format(args.DataFormat);
args.DilationRate = conv_utils.normalize_tuple(args.DilationRate.dims.Select(x => (int)x).ToArray(), args.Rank, "dilation_rate");
args.DilationRate = conv_utils.normalize_tuple(args.DilationRate.as_int_list(), args.Rank, "dilation_rate");
inputSpec = new InputSpec(ndim: rank + 2);
_tf_data_format = conv_utils.convert_data_format(data_format, rank + 2);
}
Expand Down
63 changes: 2 additions & 61 deletions src/TensorFlowNET.Keras/Layers/LayersApi.cs
Original file line number Diff line number Diff line change
Expand Up @@ -68,64 +68,6 @@ public BatchNormalization BatchNormalization(int axis = -1,
Name = name
});

/// <summary>
/// 1D convolution layer (e.g. temporal convolution).
/// This layer creates a convolution kernel that is convolved with the layer input over a single spatial(or temporal) dimension to produce a tensor of outputs.If use_bias is True, a bias vector is created and added to the outputs.Finally, if activation is not None, it is applied to the outputs as well.
/// </summary>
/// <param name="filters">Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution)</param>
/// <param name="kernel_size">An integer specifying the width of the 1D convolution window.</param>
/// <param name="strides">An integer specifying the stride of the convolution window . Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1.</param>
/// <param name="padding">one of "valid" or "same" (case-insensitive). "valid" means no padding. "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input.</param>
/// <param name="data_format">A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. If you never set it, then it will be channels_last.</param>
/// <param name="dilation_rate">An integer specifying the dilation rate to use for dilated convolution.Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1.</param>
/// <param name="groups">A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with filters / groups filters. The output is the concatenation of all the groups results along the channel axis. Input channels and filters must both be divisible by groups.</param>
/// <param name="activation">Activation function to use. If you don't specify anything, no activation is applied (see keras.activations).</param>
/// <param name="use_bias">Boolean, whether the layer uses a bias vector.</param>
/// <param name="kernel_initializer">Initializer for the kernel weights matrix (see keras.initializers).</param>
/// <param name="bias_initializer">Initializer for the bias vector (see keras.initializers).</param>
/// <param name="kernel_regularizer">Regularizer function applied to the kernel weights matrix (see keras.regularizers).</param>
/// <param name="bias_regularizer">Regularizer function applied to the bias vector (see keras.regularizers).</param>
/// <param name="activity_regularizer">Regularizer function applied to the output of the layer (its "activation") (see keras.regularizers).</param>
/// <returns>A tensor of rank 3 representing activation(conv1d(inputs, kernel) + bias).</returns>
public Conv1D Conv1D(int filters,
int? kernel_size = null,
int? strides = null,
string padding = "valid",
string data_format = null,
int? dilation_rate = null,
int groups = 1,
Activation activation = null,
bool use_bias = true,
IInitializer kernel_initializer = null,
IInitializer bias_initializer = null,
IRegularizer kernel_regularizer = null,
IRegularizer bias_regularizer = null,
IRegularizer activity_regularizer = null)
{
// Special case: Conv1D will be implemented as Conv2D with H=1, so we need to add a 1-sized dimension to the kernel.
// Lower-level logic handles the stride and dilation_rate, but the kernel_size needs to be set properly here.

var kernel = (kernel_size == null) ? (1, 5) : (1, kernel_size.Value);
return new Conv1D(new Conv1DArgs
{
Rank = 1,
Filters = filters,
KernelSize = kernel,
Strides = strides == null ? 1 : strides,
Padding = padding,
DataFormat = data_format,
DilationRate = dilation_rate == null ? 1 : dilation_rate,
Groups = groups,
UseBias = use_bias,
KernelInitializer = kernel_initializer == null ? tf.glorot_uniform_initializer : kernel_initializer,
BiasInitializer = bias_initializer == null ? tf.zeros_initializer : bias_initializer,
KernelRegularizer = kernel_regularizer,
BiasRegularizer = bias_regularizer,
ActivityRegularizer = activity_regularizer,
Activation = activation ?? keras.activations.Linear
});
}

/// <summary>
/// 1D convolution layer (e.g. temporal convolution).
/// This layer creates a convolution kernel that is convolved with the layer input over a single spatial(or temporal) dimension to produce a tensor of outputs.If use_bias is True, a bias vector is created and added to the outputs.Finally, if activation is not None, it is applied to the outputs as well.
Expand All @@ -143,7 +85,7 @@ public Conv1D Conv1D(int filters,
/// <param name="bias_initializer">Initializer for the bias vector (see keras.initializers).</param>
/// <returns>A tensor of rank 3 representing activation(conv1d(inputs, kernel) + bias).</returns>
public Conv1D Conv1D(int filters,
int? kernel_size = null,
Shape? kernel_size = null,
int? strides = null,
string padding = "valid",
string data_format = null,
Expand All @@ -157,12 +99,11 @@ public Conv1D Conv1D(int filters,
// Special case: Conv1D will be implemented as Conv2D with H=1, so we need to add a 1-sized dimension to the kernel.
// Lower-level logic handles the stride and dilation_rate, but the kernel_size needs to be set properly here.

var kernel = (kernel_size == null) ? (1, 5) : (1, kernel_size.Value);
return new Conv1D(new Conv1DArgs
{
Rank = 1,
Filters = filters,
KernelSize = kernel,
KernelSize = kernel_size ?? new Shape(1, 5),
Strides = strides == null ? 1 : strides,
Padding = padding,
DataFormat = data_format,
Expand Down
35 changes: 15 additions & 20 deletions src/TensorFlowNET.Keras/Saving/hdf5_format.cs
Original file line number Diff line number Diff line change
Expand Up @@ -210,19 +210,19 @@ public static void save_weights_to_hdf5_group(long f, List<ILayer> layers)
}
}

private static void save_attributes_to_hdf5_group(long f,string name ,Array data)
private static void save_attributes_to_hdf5_group(long f, string name, Array data)
{
int num_chunks = 1;

var chunked_data = Split(data, num_chunks);
int getSize= 0;
string getType = data.Length>0?data.GetValue(0).GetType().Name.ToLower():"string";
int getSize = 0;

string getType = data.Length > 0 ? data.GetValue(0).GetType().Name.ToLower() : "string";

switch (getType)
{
case "single":
getSize=sizeof(float);
getSize = sizeof(float);
break;
case "double":
getSize = sizeof(double);
Expand All @@ -237,30 +237,25 @@ private static void save_attributes_to_hdf5_group(long f,string name ,Array data
getSize = sizeof(long);
break;
default:
getSize=-1;
getSize = -1;
break;
}
int getCount = chunked_data.Count;

if (getSize != -1) {
num_chunks = (int)Math.Ceiling((double)(getCount * getSize) / (double)HDF5_OBJECT_HEADER_LIMIT);

if (getSize != -1)
{
num_chunks = (int)Math.Ceiling((double)(getCount * getSize) / HDF5_OBJECT_HEADER_LIMIT);
if (num_chunks > 1) chunked_data = Split(data, num_chunks);
}

if (num_chunks > 1)
{
foreach (var (chunk_id, chunk_data) in enumerate(chunked_data))
{

WriteAttrs(f, getType, $"{name}{chunk_id}", chunk_data.ToArray());

}

}
else {

WriteAttrs(f, getType,name, data);

else
{
WriteAttrs(f, getType, name, data);
}
}

Expand Down

0 comments on commit 1bc6988

Please sign in to comment.