From 9fa003ba20712bc70e7218e4df43b082878e138a Mon Sep 17 00:00:00 2001 From: RoberLopez Date: Sat, 28 Dec 2024 14:20:15 +0100 Subject: [PATCH] clean --- examples/airfoil_self_noise/main.cpp | 8 +- opennn/adaptive_moment_estimation.cpp | 12 +- opennn/batch.cpp | 98 ++++------- opennn/batch.h | 1 - opennn/data_set.cpp | 30 ++-- opennn/data_set.h | 7 +- opennn/neural_network.cpp | 8 +- opennn/tensors.cpp | 241 +------------------------- tests/correlations_test.cpp | 1 + tests/neural_network_test.cpp | 60 +++---- tests/perceptron_layer_test.cpp | 164 ++---------------- 11 files changed, 119 insertions(+), 511 deletions(-) diff --git a/examples/airfoil_self_noise/main.cpp b/examples/airfoil_self_noise/main.cpp index 7d82162dd..0a4113340 100644 --- a/examples/airfoil_self_noise/main.cpp +++ b/examples/airfoil_self_noise/main.cpp @@ -25,11 +25,16 @@ int main() // Data set DataSet data_set("C:/airfoil_self_noise.csv", ";", true); - + const Index input_variables_number = data_set.get_variables_number(DataSet::VariableUse::Input); const Index target_variables_number = data_set.get_variables_number(DataSet::VariableUse::Target); data_set.set(DataSet::SampleUse::Training); + + //data_set.print_input_target_raw_variables_correlations(); + + //data_set.save("../opennn/examples/airfoil_self_noise/data/neural_network.xml"); + //data_set.load("../opennn/examples/airfoil_self_noise/data/neural_network.xml"); // Neural network @@ -45,7 +50,6 @@ int main() TrainingStrategy training_strategy(&neural_network, &data_set); - // training_strategy.set_display(false); //training_strategy.print(); diff --git a/opennn/adaptive_moment_estimation.cpp b/opennn/adaptive_moment_estimation.cpp index ef9196f45..fc98c850f 100644 --- a/opennn/adaptive_moment_estimation.cpp +++ b/opennn/adaptive_moment_estimation.cpp @@ -253,18 +253,19 @@ TrainingResults AdaptiveMomentEstimation::perform_training() //cout << "Iteration " << iteration << "/" << training_batches_number << endl; // Data set - training_batch.fill(training_batches[iteration], input_variable_indices, - target_variable_indices, - decoder_variable_indices); -/* + decoder_variable_indices, + target_variable_indices); + + + // Neural network neural_network->forward_propagate(training_batch.get_input_pairs(), training_forward_propagation, is_training); - + // Loss index loss_index->back_propagate(training_batch, @@ -289,7 +290,6 @@ TrainingResults AdaptiveMomentEstimation::perform_training() //if(display && epoch % display_period == 0) // display_progress_bar(iteration, training_batches_number - 1); -*/ } // Loss diff --git a/opennn/batch.cpp b/opennn/batch.cpp index 57e6784ca..8aaadba08 100644 --- a/opennn/batch.cpp +++ b/opennn/batch.cpp @@ -10,7 +10,6 @@ #include "tensors.h" #include "image_data_set.h" #include "images.h" -//#include "language_data_set.h" namespace opennn { @@ -36,12 +35,12 @@ void Batch::fill(const vector& sample_indices, } else { - //fill_tensor_data(data, sample_indices, input_indices, input_tensor.data()); + fill_tensor_data(data, sample_indices, input_indices, input_tensor.data()); } - //fill_tensor_data(data, sample_indices, decoder_indices, decoder_tensor.data()); + fill_tensor_data(data, sample_indices, decoder_indices, decoder_tensor.data()); - //fill_tensor_data(data, sample_indices, target_indices, target_tensor.data()); + fill_tensor_data(data, sample_indices, target_indices, target_tensor.data()); } @@ -122,67 +121,42 @@ Batch::Batch(const Index& new_samples_number, DataSet* new_data_set) } -void Batch::set(const Index& new_batch_size, DataSet* new_data_set) +void Batch::set(const Index& new_samples_number, DataSet* new_data_set) { if (!new_data_set) return; - samples_number = new_batch_size; + samples_number = new_samples_number; data_set = new_data_set; const dimensions& data_set_input_dimensions = data_set->get_input_dimensions(); const dimensions& data_set_decoder_dimensions = data_set->get_decoder_dimensions(); const dimensions& data_set_target_dimensions = data_set->get_target_dimensions(); - // Inputs - - if(data_set_input_dimensions.size() == 2) - { - const Index rows_number = data_set_input_dimensions[0]; - const Index columns_number = data_set_input_dimensions[1]; - - input_dimensions = {{samples_number, rows_number, columns_number}}; - input_tensor.resize(samples_number*rows_number* columns_number); - } - else if(data_set_input_dimensions.size() == 3) + if (!data_set_input_dimensions.empty()) { - const Index rows_number = data_set_input_dimensions[0]; - const Index columns_number = data_set_input_dimensions[1]; - const Index channels = data_set_input_dimensions[2]; + input_dimensions = { samples_number }; + input_dimensions.insert(input_dimensions.end(), data_set_input_dimensions.begin(), data_set_input_dimensions.end()); - input_dimensions = {{samples_number, rows_number, columns_number, channels}}; - input_tensor.resize(samples_number*channels*rows_number*columns_number); + const Index input_size = accumulate(input_dimensions.begin(), input_dimensions.end(), 1, multiplies()); + input_tensor.resize(input_size); } - // Decoder - - if(data_set_decoder_dimensions.size() == 2) + if (!data_set_decoder_dimensions.empty()) { - const Index rows_number = data_set_input_dimensions[0]; - const Index columns_number = data_set_input_dimensions[1]; + decoder_dimensions = { samples_number }; + decoder_dimensions.insert(decoder_dimensions.end(), data_set_decoder_dimensions.begin(), data_set_decoder_dimensions.end()); - decoder_dimensions = {{samples_number, rows_number, columns_number}}; - decoder_tensor.resize(samples_number*rows_number* columns_number); + const Index decoder_size = accumulate(decoder_dimensions.begin(), decoder_dimensions.end(), 1, multiplies()); + decoder_tensor.resize(decoder_size); } - // Target - - if(data_set_target_dimensions.size() == 2) + if (!data_set_target_dimensions.empty()) { - const Index rows_number = data_set_target_dimensions[0]; - const Index columns_number = data_set_target_dimensions[1]; + target_dimensions = { samples_number }; + target_dimensions.insert(target_dimensions.end(), data_set_target_dimensions.begin(), data_set_target_dimensions.end()); - target_dimensions = {{samples_number, rows_number, columns_number}}; - target_tensor.resize(samples_number*rows_number*columns_number); - } - else if(data_set_target_dimensions.size() == 3) - { - const Index rows_number = data_set_target_dimensions[0]; - const Index columns_number = data_set_target_dimensions[1]; - const Index channels = data_set_target_dimensions[2]; - - target_dimensions = {{samples_number, rows_number, columns_number, channels}}; - - target_tensor.resize(samples_number*channels*rows_number*columns_number); + const Index target_size = accumulate(target_dimensions.begin(), target_dimensions.end(), 1, multiplies()); + target_tensor.resize(target_size); } } @@ -195,17 +169,15 @@ Index Batch::get_samples_number() const void Batch::print() const { - const Index inputs_rank = input_dimensions.size(); - const Index targets_rank = target_dimensions.size(); - cout << "Batch" << endl << "Inputs:" << endl - << "Inputs dimensions:" << endl; + << "Input dimensions:" << endl; + + print_vector(input_dimensions); - for(Index i = 0; i < inputs_rank; i++) - cout << input_dimensions[i] << endl; - if(inputs_rank == 4) + /* + if(input_dimensions.size() == 4) { const TensorMap> inputs((type*)input_tensor.data(), input_dimensions[0], @@ -215,18 +187,24 @@ void Batch::print() const cout << inputs << endl; } + */ + + cout << "Decoder:" << endl + << "Decoder dimensions:" << endl; + + print_vector(decoder_dimensions); cout << "Targets:" << endl - << "Targets dimensions:" << endl; + << "Target dimensions:" << endl; + + print_vector(target_dimensions); - for(Index i = 0; i < targets_rank; i++) - cout << target_dimensions[i] << endl; +// const TensorMap> targets((type*)target_tensor.data(), +// target_dimensions[0], +// target_dimensions[1]); - const TensorMap> targets((type*)target_tensor.data(), - target_dimensions[0], - target_dimensions[1]); +// cout << targets << endl; - cout << targets << endl; } diff --git a/opennn/batch.h b/opennn/batch.h index 2242183b9..858005457 100644 --- a/opennn/batch.h +++ b/opennn/batch.h @@ -42,7 +42,6 @@ struct Batch dimensions target_dimensions; Tensor target_tensor; - unique_ptr thread_pool; unique_ptr thread_pool_device; }; diff --git a/opennn/data_set.cpp b/opennn/data_set.cpp index d50fb2503..5a8f3c3a4 100755 --- a/opennn/data_set.cpp +++ b/opennn/data_set.cpp @@ -1689,6 +1689,7 @@ void DataSet::set(const filesystem::path& new_data_path, const bool& new_has_ids, const DataSet::Codification& new_codification) { + set_default(); set_data_path(new_data_path); @@ -1700,9 +1701,9 @@ void DataSet::set(const filesystem::path& new_data_path, set_has_ids(new_has_ids); set_codification(new_codification); - + read_csv(); - + set_default_raw_variables_scalers(); set_default_raw_variables_uses(); @@ -2980,11 +2981,11 @@ void DataSet::print() const << "Number of variables: " << variables_number << "\n" << "Number of input variables: " << input_variables_number << "\n" << "Number of target variables: " << target_variables_bumber << "\n" - << "Input variables dimensions: "; + << "Input dimensions: "; print_vector(get_input_dimensions()); - cout << "Target variables dimensions: "; + cout << "Target dimensions: "; print_vector(get_target_dimensions()); @@ -3866,7 +3867,7 @@ void DataSet::read_csv() if(columns_number != 0) break; } - + const Index raw_variables_number = has_sample_ids ? columns_number - 1 : columns_number; @@ -3891,7 +3892,7 @@ void DataSet::read_csv() samples_number++; set_default_raw_variables_names(); } - + // Rest of lines while(getline(file, line)) @@ -3912,7 +3913,7 @@ void DataSet::read_csv() samples_number++; } - + for(Index i = 0; i < raw_variables_number; i++) if(raw_variables[i].type == RawVariableType::Categorical && raw_variables[i].get_categories_number() == 2) @@ -3921,24 +3922,24 @@ void DataSet::read_csv() sample_uses.resize(samples_number); sample_ids.resize(samples_number); - + // const Index variables_number = get_variables_number(); const Index variables_number = columns_number; const vector> all_variable_indices = get_variable_indices(); - + data.resize(samples_number, variables_number); data.setZero(); rows_missing_values_number = 0; missing_values_number = 0; - + raw_variables_missing_values_number.resize(raw_variables_number); raw_variables_missing_values_number.setZero(); // Fill data - + file.clear(); file.seekg(0); @@ -4050,7 +4051,6 @@ void DataSet::read_csv() } sample_index++; - } file.close(); @@ -4377,11 +4377,11 @@ void DataSet::fix_repeated_names() } -vector> DataSet::split_samples(const vector& sample_indices, const Index& new_batch_size) const +vector> DataSet::split_samples(const vector& sample_indices, const Index& new_samples_number) const { const Index samples_number = sample_indices.size(); - Index batch_size = new_batch_size; + Index batch_size = new_samples_number; Index batches_number; @@ -4395,7 +4395,7 @@ vector> DataSet::split_samples(const vector& sample_indices batches_number = samples_number / batch_size; } -// const Index batches_number = (samples_number + new_batch_size - 1) / new_batch_size; // Round up division +// const Index batches_number = (samples_number + new_samples_number - 1) / new_samples_number; // Round up division vector> batches(batches_number); diff --git a/opennn/data_set.h b/opennn/data_set.h index 22ec769d5..04b9115ea 100755 --- a/opennn/data_set.h +++ b/opennn/data_set.h @@ -230,7 +230,12 @@ class DataSet // Set void set(const Index& = 0, const dimensions& = {}, const dimensions& = {}); - void set(const filesystem::path&, const string&, const bool& = true, const bool& = false, const DataSet::Codification& = Codification::UTF8); + + void set(const filesystem::path&, + const string&, + const bool& = true, + const bool& = false, + const DataSet::Codification& = Codification::UTF8); void set(const filesystem::path&); diff --git a/opennn/neural_network.cpp b/opennn/neural_network.cpp index c9619021b..71ce754ea 100644 --- a/opennn/neural_network.cpp +++ b/opennn/neural_network.cpp @@ -433,20 +433,20 @@ void NeuralNetwork::set_image_classification(const dimensions& input_dimensions, if (input_dimensions.size() != 3) throw runtime_error("Input dimensions size is not 3."); - const Index complexity_size = complexity_dimensions.size(); - add_layer(make_unique(input_dimensions)); + const Index complexity_size = complexity_dimensions.size(); + for (Index i = 0; i < complexity_size; i++) { const dimensions kernel_dimensions = { 3, 3, get_output_dimensions()[2], complexity_dimensions[i] }; - const dimensions convolution_stride_dimensions = { 1, 1 }; + const dimensions stride_dimensions = { 1, 1 }; const ConvolutionalLayer::ConvolutionType convolution_type = ConvolutionalLayer::ConvolutionType::Valid; add_layer(make_unique(get_output_dimensions(), kernel_dimensions, ConvolutionalLayer::ActivationFunction::RectifiedLinear, - convolution_stride_dimensions, + stride_dimensions, convolution_type, "convolutional_layer_" + to_string(i+1))); diff --git a/opennn/tensors.cpp b/opennn/tensors.cpp index e28023663..71b5d2f11 100644 --- a/opennn/tensors.cpp +++ b/opennn/tensors.cpp @@ -1,7 +1,7 @@ // OpenNN: Open Neural Networks Library // www.opennn.net // -// T E N S O R U T I L I T I E S S O U R C E +// T E N S O R S S O U R C E // // Artificial Intelligence Techniques, SL // artelnics@artelnics.com @@ -46,18 +46,6 @@ bool get_random_bool() } -//void multiply_rows(Tensor& matrix, const Tensor& vector) -//{ -// const Index rows_number = matrix.dimension(0); -// const Index columns_number = matrix.dimension(1); - -// #pragma omp parallel for -// for(Index i = 0; i < rows_number; i++) -// for(Index j = 0; j < columns_number; j++) -// matrix(i, j) *= vector(j); -//} - - void multiply_matrices(const ThreadPoolDevice* thread_pool_device, Tensor& tensor, const Tensor& vector) @@ -228,141 +216,6 @@ void batch_matrix_multiplication(const ThreadPoolDevice* thread_pool_device, } -//void batch_matrix_multiplication(const ThreadPoolDevice* thread_pool_device, -// const Tensor& A, -// const Tensor& B, -// Tensor& C, -// const Eigen::array, 1> contraction_axes) -//{ -// Assumes A, B & C share their last dimension. A & C also share their second-to-last dimension. -// A & B share one of their remaining 2 dimensions (the contraction axes). -// The other 2 dimensions of C will be the non-equal dimensions of A & B, in that order. -// By default contraction axes are (1, 0). - -// const Index A_rows = A.dimension(0); -// const Index A_columns = A.dimension(1); -// const Index B_rows = B.dimension(0); -// const Index B_columns = B.dimension(1); - -// const Index C_rows = (contraction_axes[0].first == 0) ? A_columns : A_rows; -// const Index C_columns = (contraction_axes[0].second == 1) ? B_rows : B_columns; - -// const Index channels = A.dimension(2); -// const Index blocks_number = A.dimension(3); - -// type* A_data = (type*) A.data(); -// type* B_data = (type*) B.data(); -// type* C_data = C.data(); - -// type* a_block_data = nullptr; -// type* b_matrix_data = nullptr; -// type* c_block_data = nullptr; - -// for(Index i = 0; i < blocks_number; i++) -// { -// a_block_data = A_data + A_rows * A_columns * channels * i; -// b_matrix_data = B_data + B_rows * B_columns * i; -// c_block_data = C_data + C_rows * C_columns * channels * i; - -// const TensorMap> A_block(a_block_data, A_rows, A_columns, channels); -// const TensorMap> B_matrix(b_matrix_data, B_rows, B_columns); -// TensorMap> C_block(c_block_data, C_rows, C_columns, channels); - -// C_block.device(*thread_pool_device) = A_block.contract(B_matrix, contraction_axes); -// } -//} - - -//void batch_matrix_multiplication(const ThreadPoolDevice* thread_pool_device, -// const Tensor& A, -// const Tensor& B, -// Tensor& C, -// const Eigen::array, 1> contraction_axes) -//{ - // Assumes A, B & C share their last 2 dimensions, and the first dimension of B is equal to one of the 2 remaining of A (the contraction axes) - // The other dimension of C will be the non-equal dimension of A - // By default contraction axes are (1, 0) - -// const Index A_rows = A.dimension(0); -// const Index A_columns = A.dimension(1); -// const Index B_rows = B.dimension(0); - -// const Index C_rows = (contraction_axes[0].first == 0) ? A_columns : A_rows; - -// const Index channels = A.dimension(2); -// const Index blocks_number = A.dimension(3); - -// type* A_data = (type*) A.data(); -// type* B_data = (type*) B.data(); -// type* C_data = C.data(); - -// type* a_matrix_data = nullptr; -// type* b_vector_data = nullptr; -// type* c_vector_data = nullptr; - -// for(Index i = 0; i < blocks_number; i++) -// { -// for(Index j = 0; j < channels; j++) -// { -// a_matrix_data = A_data + A_rows * A_columns * (i * channels + j); -// b_vector_data = B_data + B_rows * (i * channels + j); -// c_vector_data = C_data + C_rows * (i * channels + j); - -// const TensorMap> A_matrix(a_matrix_data, A_rows, A_columns); -// const TensorMap> B_vector(b_vector_data, B_rows); -// TensorMap> C_vector(c_vector_data, C_rows); - -// C_vector.device(*thread_pool_device) = A_matrix.contract(B_vector, contraction_axes); -// } -// } -//} - - -// void batch_matrix_multiplication(const ThreadPoolDevice* thread_pool_device, -// const Tensor& A, -// const Tensor& B, -// TensorMap>& C, -// const Eigen::array, 1> contraction_axes) -// { -// // Assumes A, B & C share their last 2 dimensions, and the first dimension of B is equal to one of the 2 remaining of A (the contraction axes). -// // The other dimension of C will be the non-equal dimension of A. -// // By default contraction axes are (1, 0). - -// const Index A_rows = A.dimension(0); -// const Index A_columns = A.dimension(1); -// const Index B_rows = B.dimension(0); - -// const Index C_rows = (contraction_axes[0].first == 0) ? A_columns : A_rows; - -// const Index channels = A.dimension(2); -// const Index blocks_number = A.dimension(3); - -// type* A_data = (type*)A.data(); -// type* B_data = (type*)B.data(); -// type* C_data = C.data(); - -// type* a_matrix_data = nullptr; -// type* b_vector_data = nullptr; -// type* c_vector_data = nullptr; - -// for(Index i = 0; i < blocks_number; i++) -// { -// for(Index j = 0; j < channels; j++) -// { -// a_matrix_data = A_data + A_rows * A_columns * (i * channels + j); -// b_vector_data = B_data + B_rows * (i * channels + j); -// c_vector_data = C_data + C_rows * (i * channels + j); - -// const TensorMap> A_matrix(a_matrix_data, A_rows, A_columns); -// const TensorMap> B_vector(b_vector_data, B_rows); -// TensorMap> C_vector(c_vector_data, C_rows); - -// C_vector.device(*thread_pool_device) = A_matrix.contract(B_vector, contraction_axes); -// } -// } -// } - - Tensor self_kronecker_product(const ThreadPoolDevice* thread_pool_device, const Tensor& vector) { const Index columns_number = vector.size(); @@ -380,19 +233,6 @@ Tensor self_kronecker_product(const ThreadPoolDevice* thread_pool_devic } -// void divide_columns(const ThreadPoolDevice* thread_pool_device, Tensor& matrix, const Tensor& vector) -// { -// const Index columns_number = matrix.dimension(1); - -// for(Index i = 0; i < columns_number; i++) -// { -// TensorMap> column = tensor_map(matrix, i); - -// column.device(*thread_pool_device) = column / vector; -// } -// } - - void divide_columns(const ThreadPoolDevice* thread_pool_device, TensorMap>& matrix, const Tensor& vector) { const Index columns_number = matrix.dimension(1); @@ -452,53 +292,6 @@ void sum_matrices(const ThreadPoolDevice* thread_pool_device, const Tensor>& vector, Tensor& tensor) -//{ -// const Index rows_number = tensor.dimension(0); -// const Index columns_number = tensor.dimension(1); -// const Index channels = tensor.dimension(2); - -// const Index slice_size = rows_number * columns_number; - -// for(Index i = 0; i < channels; i++) -// { -// TensorMap> matrix(tensor.data() + i*slice_size, rows_number, columns_number); - -// matrix.device(*thread_pool_device) = matrix + vector(i); -// } -//} - - -//void sum_matrices(const ThreadPoolDevice* thread_pool_device, const Tensor& matrix, Tensor& tensor) -//{ -// const Index rows_number = tensor.dimension(0); -// const Index columns_number = tensor.dimension(1); -// const Index channels = tensor.dimension(2); - -// const Index slice_size = rows_number * columns_number; - -// for(Index i = 0; i < channels; i++) -// { -// TensorMap> submatrix(tensor.data() + i*slice_size, rows_number, columns_number); - -// submatrix.device(*thread_pool_device) += matrix; -// } -//} - - -//void substract_columns(const ThreadPoolDevice* thread_pool_device, const Tensor& vector, Tensor& matrix) -//{ -// const Index columns_number = matrix.dimension(1); - -// for(Index i = 0; i < columns_number; i++) -// { -// TensorMap> column = tensor_map(matrix, i); - -// column.device(*thread_pool_device) = column - vector; -// } -//} - - void substract_matrices(const ThreadPoolDevice* thread_pool_device, const Tensor& matrix, Tensor& tensor) { const Index rows_number = tensor.dimension(0); @@ -650,14 +443,6 @@ Index count_between(const Tensor& vector,const type& minimum, const typ } -//void get_row(Tensor& row, const Tensor& matrix, const Index& row_index) -//{ -// const Index columns_number = row.dimension(0); - -// memcpy(row.data(), matrix.data() + row_index * columns_number, columns_number*sizeof(type)); -//} - - void set_row(Tensor& matrix, const Tensor& new_row, const Index& row_index) { const Index columns_number = new_row.size(); @@ -854,28 +639,6 @@ void sum_diagonal(Tensor& matrix, const type& value) } -//void sum_diagonal(Tensor& matrix, const Tensor& values) -//{ -// const Index rows_number = matrix.dimension(0); - -// #pragma omp parallel for - -// for(Index i = 0; i < rows_number; i++) -// matrix(i, i) += values(i); -//} - - -//void substract_diagonal(Tensor& matrix, const Tensor& values) -//{ -// const Index rows_number = matrix.dimension(0); - -// #pragma omp parallel for - -// for(Index i = 0; i < rows_number; i++) -// matrix(i, i) -= values(i); -//} - - Tensor perform_Householder_QR_decomposition(const Tensor& A, const Tensor& b) { const Index n = A.dimension(0); @@ -908,7 +671,7 @@ void fill_tensor_data(const Tensor& matrix, const type* matrix_data = matrix.data(); - #pragma omp parallel for + //#pragma omp parallel for for (Index j = 0; j < columns_number; j++) { diff --git a/tests/correlations_test.cpp b/tests/correlations_test.cpp index 438b3575f..276544de7 100644 --- a/tests/correlations_test.cpp +++ b/tests/correlations_test.cpp @@ -27,6 +27,7 @@ class CorrelationsTest : public ::testing::Test TEST_F(CorrelationsTest, SpearmanCorrelation) { + Tensor x(10); x.setValues({ type(1), type(2), type(3), type(4), type(5), type(6), type(7), type(8), type(9), type(10) }); diff --git a/tests/neural_network_test.cpp b/tests/neural_network_test.cpp index eea9ea660..e5f86ce14 100644 --- a/tests/neural_network_test.cpp +++ b/tests/neural_network_test.cpp @@ -57,25 +57,27 @@ TEST(NeuralNetworkTest, AutoAssociationConstructor) TEST(NeuralNetworkTest, ImageClassificationConstructor) { -/* - Inputs variables dimension = (channels, width, height) - Tensor inputs_variables_dimension(3); - inputs_variables_dimension.setValues({ 1,28,28 }); - - Index blocks_number = 0; - Index outputs_number = 10; - Tensor filters_dimensions(3); - filters_dimensions.setValues({ 1,2,2 }); - NeuralNetwork neural_network_4(inputs_variables_dimension, blocks_number, filters_dimensions, outputs_number); - - EXPECT_EQ(neural_network_4.get_layers_number(), 6); // Scaling, 1Bloque (Conv, Pool), Flatten, 1 Perceptron, Probabilistic. - EXPECT_EQ(neural_network_4.get_layer(0)->get_type(), Layer::Type::Scaling2D); - EXPECT_EQ(neural_network_4.get_layer(1)->get_type(), Layer::Type::Convolutional); - EXPECT_EQ(neural_network_4.get_layer(2)->get_type(), Layer::Type::Pooling); - EXPECT_EQ(neural_network_4.get_layer(3)->get_type(), Layer::Type::Flatten); - EXPECT_EQ(neural_network_4.get_layer(4)->get_type(), Layer::Type::Perceptron); - EXPECT_EQ(neural_network_4.get_layer(5)->get_type(), Layer::Type::Probabilistic); -*/ + // Input dimensions {height, width, channels} + + const Index height = 3; + const Index width = 3; + const Index channels = 1; + + const Index blocks = 1; + + const Index outputs_number = 1; + + //NeuralNetwork neural_network(NeuralNetwork::ModelType::ImageClassification, + // {height, width, channels}, {blocks}, { outputs_number }); + + //EXPECT_EQ(neural_network.get_layers_number(), 5); + //EXPECT_EQ(neural_network.get_layer(0)->get_type(), Layer::Type::Scaling4D); + //EXPECT_EQ(neural_network.get_layer(1)->get_type(), Layer::Type::Convolutional); + //EXPECT_EQ(neural_network.get_layer(2)->get_type(), Layer::Type::Pooling); + //EXPECT_EQ(neural_network.get_layer(3)->get_type(), Layer::Type::Flatten); + //EXPECT_EQ(neural_network.get_layer(4)->get_type(), Layer::Type::Perceptron); + //EXPECT_EQ(neural_network.get_layer(5)->get_type(), Layer::Type::Probabilistic); + } @@ -103,27 +105,27 @@ TEST(NeuralNetworkTest, CalculateOutputsEmpty) TEST(NeuralNetworkTest, CalculateOutputsZero) { + const Index samples_number = get_random_index(1, 5); + const Index inputs_number = get_random_index(1, 5); + const Index neurons_number = get_random_index(1, 5); + const Index outputs_number = get_random_index(1, 5); - const Index batch_samples_number = 3; - const Index inputs_number = 2; - const Index neurons_number = 4; - const Index outputs_number = 5; - - NeuralNetwork neural_network(NeuralNetwork::ModelType::Approximation, {inputs_number}, {neurons_number}, {outputs_number}); + NeuralNetwork neural_network(NeuralNetwork::ModelType::Approximation, + {inputs_number}, {neurons_number}, {outputs_number}); neural_network.set_parameters_constant(type(0)); - Tensor inputs(batch_samples_number, inputs_number); + Tensor inputs(samples_number, inputs_number); inputs.setConstant(type(0)); -// const Tensor outputs = neural_network.calculate_outputs(inputs); - + const Tensor outputs = neural_network.calculate_outputs(inputs); + /* // EXPECT_EQ(outputs.size(), batch_samples_number * outputs_number); // EXPECT_NEAR(outputs(0,0), 0, NUMERIC_LIMITS_MIN); // EXPECT_NEAR(outputs(0,1), 0, NUMERIC_LIMITS_MIN); // EXPECT_NEAR(outputs(0,2), 0, NUMERIC_LIMITS_MIN); // EXPECT_NEAR(outputs(0,3), 0, NUMERIC_LIMITS_MIN); // EXPECT_NEAR(outputs(0,4), 0, NUMERIC_LIMITS_MIN); -/* + // Test neural_network.set(NeuralNetwork::ModelType::Approximation, {1, 2}); diff --git a/tests/perceptron_layer_test.cpp b/tests/perceptron_layer_test.cpp index e7540a938..34a29780c 100644 --- a/tests/perceptron_layer_test.cpp +++ b/tests/perceptron_layer_test.cpp @@ -165,7 +165,6 @@ TEST(PerceptronLayerTest, ForwardPropagateZero) TEST(PerceptronLayerTest, ForwardPropagate) { - /* Tensor parameters; Tensor inputs; @@ -173,25 +172,25 @@ TEST(PerceptronLayerTest, ForwardPropagate) Tensor potential_parameters; - pair input_pairs; - - // Test + ; - samples_number = 2; - inputs_number = 2; - neurons_number = 2; + const Index samples_number = 2; + const Index inputs_number = 2; + const Index neurons_number = 2; bool is_training = true; - perceptron_layer.set(inputs_number, neurons_number, PerceptronLayer::ActivationFunction::Linear); + PerceptronLayer perceptron_layer({ inputs_number }, + { neurons_number }, + PerceptronLayer::ActivationFunction::Linear); perceptron_layer.set_parameters_constant(type(1)); inputs.resize(samples_number, inputs_number); inputs.setConstant(type(1)); - perceptron_layer_forward_propagation->set(samples_number, perceptron_layer.get()); - - input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; + PerceptronLayerForwardPropagation perceptron_layer_forward_propagation(samples_number, &perceptron_layer); + const pair input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; +/* perceptron_layer->forward_propagate({input_pairs}, perceptron_layer_forward_propagation, is_training); @@ -204,148 +203,5 @@ TEST(PerceptronLayerTest, ForwardPropagate) EXPECT_EQ(abs(perceptron_layer_forward_propagation.activation_derivatives(0,0) - type(1)) < type(1e-3)); EXPECT_EQ(abs(perceptron_layer_forward_propagation.activation_derivatives(0,1) - type(1)) < type(1e-3)); - // Test - - samples_number = 2; - inputs_number = 2; - neurons_number = 2; - - perceptron_layer.set(inputs_number, neurons_number, PerceptronLayer::ActivationFunction::HyperbolicTangent); - perceptron_layer.set_parameters_constant(type(1)); - - inputs.resize(samples_number, inputs_number); - inputs.setConstant(type(1)); - - potential_parameters = perceptron_layer.get_parameters(); - - perceptron_layer_forward_propagation.set(samples_number, &perceptron_layer); - - input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; - - perceptron_layer.forward_propagate({input_pairs}, &perceptron_layer_forward_propagation, is_training); - - outputs = perceptron_layer_forward_propagation.outputs; - - EXPECT_EQ(abs(outputs(0,0) - type(0.99505)) < type(1e-3)); - EXPECT_EQ(abs(outputs(0,1) - type(0.99505)) < type(1e-3)); - EXPECT_EQ(abs(perceptron_layer_forward_propagation.activation_derivatives(0,0) - type(0.00986)) < type(1e-3)); - EXPECT_EQ(abs(perceptron_layer_forward_propagation.activation_derivatives(0,1) - type(0.00986)) < type(1e-3)); - - // Test - - samples_number = 1; - inputs_number = 3; - neurons_number = 4; - - perceptron_layer.set(inputs_number, neurons_number); - - synaptic_weights.resize(inputs_number, neurons_number); - biases.resize( neurons_number); - inputs.resize(samples_number, inputs_number); - outputs.resize(1, neurons_number); - - inputs.setConstant(type(1)); - - perceptron_layer.set_activation_function(PerceptronLayer::ActivationFunction::Linear); - - perceptron_layer_forward_propagation.set(samples_number, &perceptron_layer); - - input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; - - perceptron_layer.forward_propagate({input_pairs}, &perceptron_layer_forward_propagation, is_training); - - outputs = perceptron_layer_forward_propagation.outputs; - - EXPECT_EQ(outputs.dimension(0) == 1); - EXPECT_EQ(outputs.dimension(1) == 4); - EXPECT_EQ(Index(outputs(0,0)) == 7); - EXPECT_EQ(Index(outputs(1,0)) == -5); - EXPECT_EQ(Index(outputs(2,0)) == 1); - - // Test - - inputs_number = 3; - neurons_number = 2; - - perceptron_layer.set(inputs_number, neurons_number); - perceptron_layer.set_parameters_constant(type(0)); - - inputs.resize(samples_number, inputs_number); - inputs.setConstant(type(0)); - - outputs.resize(1, neurons_number); - - perceptron_layer_forward_propagation.set(samples_number, &perceptron_layer); - - input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; - - perceptron_layer.forward_propagate({input_pairs}, &perceptron_layer_forward_propagation, is_training); - - outputs = perceptron_layer_forward_propagation.outputs; - - EXPECT_EQ(outputs.dimension(0) == 1); - EXPECT_EQ(outputs.dimension(1) == 2); - EXPECT_NEAR(abs(outputs(0,0)) < NUMERIC_LIMITS_MIN); - - // Test - - inputs_number = 4; - neurons_number = 2; - - perceptron_layer.set(4, 2); - parameters.resize(10); - - parameters.setValues({type(-1),type(2),type(-3),type(4),type(-5),type(6),type(-7),type(8),type(-9),type(10) }); - - perceptron_layer.set_parameters(parameters); - - inputs.resize(samples_number,inputs_number); - inputs.setValues({{type(4),type(-3),type(2),type(-1)}}); - - outputs.resize(1, neurons_number); - - perceptron_layer_forward_propagation.set(samples_number, &perceptron_layer); - - input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; - - perceptron_layer.forward_propagate({input_pairs}, &perceptron_layer_forward_propagation, is_training); - - outputs = perceptron_layer_forward_propagation.outputs; - - EXPECT_EQ(outputs.dimension(0) == 1); - EXPECT_EQ(outputs.dimension(1) == 2); - EXPECT_NEAR(abs(outputs(0,0) + type(1)) < NUMERIC_LIMITS_MIN); - - // Test 5 - - inputs_number = 1; - neurons_number = 2; - - inputs.resize(samples_number, inputs_number); - inputs.setConstant(type(3.0)); - - perceptron_layer.set(inputs_number, neurons_number); - perceptron_layer.set_parameters_constant(type(-2.0)); - - outputs.resize(1, neurons_number); - - perceptron_layer_forward_propagation.set(samples_number, &perceptron_layer); - - input_pairs = {inputs.data(), {{samples_number, inputs_number}}}; - - perceptron_layer.forward_propagate({input_pairs}, &perceptron_layer_forward_propagation, is_training); - - outputs = perceptron_layer_forward_propagation.outputs; - parameters.resize(2); - parameters.setConstant(type(1)); - - // Test - - perceptron_layer.set(1, 1); - - inputs.resize(1,1); - inputs.setRandom(); - - parameters = perceptron_layer.get_parameters(); */ }