Skip to content

Commit

Permalink
convolution padding same fix
Browse files Browse the repository at this point in the history
  • Loading branch information
RubyAM committed Jan 20, 2025
1 parent 8be150e commit e3c55ca
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 56 deletions.
20 changes: 10 additions & 10 deletions examples/mnist/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,35 +27,35 @@ int main()

const Index samples_number = 3;

const Index image_height = 2;
const Index image_width = 2;
const Index channels = 1;
const Index image_height = 3;
const Index image_width = 3;
const Index channels = 3;
const Index targets = 3;

ImageDataSet image_data_set(samples_number, {image_height, image_width, channels}, {targets});
//ImageDataSet image_data_set(samples_number, {image_height, image_width, channels}, {targets});

image_data_set.set_data_random();
//image_data_set.set_data_random();

//ImageDataSet image_data_set(0,{0,0,0},{0});
ImageDataSet image_data_set(0,{0,0,0},{0});

//image_data_set.set_data_path("data");
//image_data_set.set_data_path("C:/mnist/train");
//image_data_set.set_data_path("C:/binary_mnist");
image_data_set.set_data_path("C:/binary_mnist");
//image_data_set.set_data_path("C:/Users/Roberto Lopez/Documents/opennn/examples/mnist/data");
//image_data_set.set_data_path("C:/melanoma_dataset_bmp");
//image_data_set.set_data_path("C:/melanoma_dataset_bmp_small");
//image_data_set.set_data_path("C:/melanoma_supersmall");
//image_data_set.set_input_dimensions({24,24,1});

//image_data_set.read_bmp();
image_data_set.read_bmp();

image_data_set.set(DataSet::SampleUse::Training);
//image_data_set.set(DataSet::SampleUse::Training);

// Neural network

NeuralNetwork neural_network(NeuralNetwork::ModelType::ImageClassification,
image_data_set.get_dimensions(DataSet::VariableUse::Input),
{ 1 },
{ 8 },
image_data_set.get_dimensions(DataSet::VariableUse::Target));

// Training strategy
Expand Down
43 changes: 17 additions & 26 deletions opennn/convolutional_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ void ConvolutionalLayer::forward_propagate(const vector<pair<type*, dimensions>>
Tensor<type, 4>& activation_derivatives = convolutional_layer_forward_propagation->activation_derivatives;

preprocess_inputs(inputs, preprocessed_inputs);

calculate_convolutions(preprocessed_inputs, outputs);

if(batch_normalization)
Expand All @@ -230,8 +230,8 @@ void ConvolutionalLayer::back_propagate(const vector<pair<type*, dimensions>>& i
unique_ptr<LayerForwardPropagation>& forward_propagation,
unique_ptr<LayerBackPropagation>& back_propagation) const
{
cout << "Calculando tiempo convolution backward..." << endl;
auto start = chrono::high_resolution_clock::now();
//cout << "Calculando tiempo convolution backward..." << endl;
//auto start = chrono::high_resolution_clock::now();
// Convolutional layer

const Index batch_samples_number = back_propagation->batch_samples_number;
Expand All @@ -256,6 +256,8 @@ void ConvolutionalLayer::back_propagate(const vector<pair<type*, dimensions>>& i
ConvolutionalLayerForwardPropagation* convolutional_layer_forward_propagation =
static_cast<ConvolutionalLayerForwardPropagation*>(forward_propagation.get());

Tensor<type, 4>& preprocessed_inputs = convolutional_layer_forward_propagation->preprocessed_inputs;

const Tensor<type, 4>& activation_derivatives = convolutional_layer_forward_propagation->activation_derivatives;

// Back propagation
Expand Down Expand Up @@ -292,10 +294,9 @@ void ConvolutionalLayer::back_propagate(const vector<pair<type*, dimensions>>& i
const Eigen::array<pair<Index, Index>, 2> paddings
= { make_pair(pad_top, pad_bottom), make_pair(pad_left, pad_right) };

cout << "Inputs: \n" << inputs << endl;
cout << "Deltas: \n" << deltas << endl;

cout << "synaptic weigths:\n" << synaptic_weights << endl;
// Inputs

preprocess_inputs(inputs, preprocessed_inputs);

// Convolutions derivatives

Expand All @@ -307,7 +308,7 @@ void ConvolutionalLayer::back_propagate(const vector<pair<type*, dimensions>>& i

// Synaptic weigth derivatives

//#pragma omp parallel for
#pragma omp parallel for
for (Index kernel_index = 0; kernel_index < kernels_number; kernel_index++)
{
const TensorMap<Tensor<type, 3>> kernel_convolutions_derivatives(
Expand All @@ -323,18 +324,8 @@ void ConvolutionalLayer::back_propagate(const vector<pair<type*, dimensions>>& i
kernel_width,
kernel_channels);

kernel_synaptic_weights_derivatives = inputs.convolve(kernel_convolutions_derivatives, convolutions_dimensions_3d);
kernel_synaptic_weights_derivatives = preprocessed_inputs.convolve(kernel_convolutions_derivatives, convolutions_dimensions_3d);
}

cout << "synaptic_weight_derivatives:\n" << convolutional_layer_back_propagation->synaptic_weight_derivatives << endl;
cout << "synaptic_weight dimension[0]:" << synaptic_weights.dimension(0) << endl;
cout << "synaptic_weight dimension[1]:" << synaptic_weights.dimension(1) << endl;
cout << "synaptic_weight dimension[2]:" << synaptic_weights.dimension(2) << endl;
cout << "synaptic_weight dimension[3]:" << synaptic_weights.dimension(3) << endl;
cout << "synaptic_weight_derivatives dimension[0]:" << convolutional_layer_back_propagation->synaptic_weight_derivatives.dimension(0) << endl;
cout << "synaptic_weight_derivatives dimension[1]:" << convolutional_layer_back_propagation->synaptic_weight_derivatives.dimension(1) << endl;
cout << "synaptic_weight_derivatives dimension[2]:" << convolutional_layer_back_propagation->synaptic_weight_derivatives.dimension(2) << endl;
cout << "synaptic_weight_derivatives dimension[3]:" << convolutional_layer_back_propagation->synaptic_weight_derivatives.dimension(3) << endl;

// Input derivatives

Expand Down Expand Up @@ -376,12 +367,12 @@ void ConvolutionalLayer::back_propagate(const vector<pair<type*, dimensions>>& i
}
}

auto end = chrono::high_resolution_clock::now();
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start);
cout << "Tiempo convolution back propagate: "
<< duration.count() / 1000 << "::"
<< duration.count() % 1000
<< " segundos::milisegundos" << endl;
//auto end = chrono::high_resolution_clock::now();
//auto duration = chrono::duration_cast<chrono::milliseconds>(end - start);
//cout << "Tiempo convolution back propagate: "
// << duration.count() / 1000 << "::"
// << duration.count() % 1000
// << " segundos::milisegundos" << endl;
}


Expand Down Expand Up @@ -818,7 +809,7 @@ pair<Index, Index> ConvolutionalLayer::get_padding() const
const Index kernel_width = get_kernel_width();

const Index row_stride = get_row_stride();
//const Index column_stride = get_column_stride();
const Index column_stride = get_column_stride();

const Index pad_rows = std::max<Index>(0, ((static_cast<float>(input_height) / row_stride) - 1) * row_stride + kernel_height - input_height) / 2;
const Index pad_columns = std::max<Index>(0, ((static_cast<float>(input_width) / column_stride) - 1) * column_stride + kernel_width - input_width) / 2;
Expand Down
7 changes: 0 additions & 7 deletions opennn/cross_entropy_error.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,13 +82,6 @@ void CrossEntropyError::calculate_multiple_error(const Batch& batch,

// Back propagation

//const Index layers_number = back_propagation.neural_network.layers.size();

//ProbabilisticLayerBackPropagation* probabilistic_layer_back_propagation =
// static_cast<ProbabilisticLayerBackPropagation*>(back_propagation.neural_network.layers[layers_number - 1].get());

//probabilistic_layer_back_propagation->targets = targets;

Tensor<type, 0>& error = back_propagation.error;

error.device(*thread_pool_device) = (targets*outputs.log()).sum() / type(-batch_samples_number);
Expand Down
26 changes: 13 additions & 13 deletions opennn/neural_network.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -439,26 +439,26 @@ void NeuralNetwork::set_image_classification(const dimensions& input_dimensions,
{
const dimensions kernel_dimensions = { 3, 3, get_output_dimensions()[2], complexity_dimensions[i] };
const dimensions stride_dimensions = { 1, 1 };
const ConvolutionalLayer::ConvolutionType convolution_type = ConvolutionalLayer::ConvolutionType::Valid;
const ConvolutionalLayer::ConvolutionType convolution_type = ConvolutionalLayer::ConvolutionType::Same;

//add_layer(make_unique<ConvolutionalLayer>(get_output_dimensions(),
// kernel_dimensions,
// ConvolutionalLayer::ActivationFunction::RectifiedLinear,
// stride_dimensions,
// convolution_type,
// "convolutional_layer_" + to_string(i+1)));
add_layer(make_unique<ConvolutionalLayer>(get_output_dimensions(),
kernel_dimensions,
ConvolutionalLayer::ActivationFunction::RectifiedLinear,
stride_dimensions,
convolution_type,
"convolutional_layer_" + to_string(i+1)));

const dimensions pool_dimensions = { 2, 2 };
const dimensions pooling_stride_dimensions = { 2, 2 };
const dimensions padding_dimensions = { 0, 0 };
const PoolingLayer::PoolingMethod pooling_method = PoolingLayer::PoolingMethod::MaxPooling;

//add_layer(make_unique<PoolingLayer>(get_output_dimensions(),
// pool_dimensions,
// pooling_stride_dimensions,
// padding_dimensions,
// pooling_method,
// "pooling_layer_" + to_string(i + 1)));
add_layer(make_unique<PoolingLayer>(get_output_dimensions(),
pool_dimensions,
pooling_stride_dimensions,
padding_dimensions,
pooling_method,
"pooling_layer_" + to_string(i + 1)));
}

add_layer(make_unique<FlattenLayer>(get_output_dimensions()));
Expand Down

0 comments on commit e3c55ca

Please sign in to comment.