Skip to content

Commit

Permalink
clean
Browse files Browse the repository at this point in the history
  • Loading branch information
RoberLopez committed Dec 28, 2024
1 parent a7947ed commit 9fa003b
Show file tree
Hide file tree
Showing 11 changed files with 119 additions and 511 deletions.
8 changes: 6 additions & 2 deletions examples/airfoil_self_noise/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,16 @@ int main()
// Data set

DataSet data_set("C:/airfoil_self_noise.csv", ";", true);

const Index input_variables_number = data_set.get_variables_number(DataSet::VariableUse::Input);
const Index target_variables_number = data_set.get_variables_number(DataSet::VariableUse::Target);

data_set.set(DataSet::SampleUse::Training);

//data_set.print_input_target_raw_variables_correlations();

//data_set.save("../opennn/examples/airfoil_self_noise/data/neural_network.xml");
//data_set.load("../opennn/examples/airfoil_self_noise/data/neural_network.xml");

// Neural network

Expand All @@ -45,7 +50,6 @@ int main()

TrainingStrategy training_strategy(&neural_network, &data_set);


// training_strategy.set_display(false);

//training_strategy.print();
Expand Down
12 changes: 6 additions & 6 deletions opennn/adaptive_moment_estimation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -253,18 +253,19 @@ TrainingResults AdaptiveMomentEstimation::perform_training()
//cout << "Iteration " << iteration << "/" << training_batches_number << endl;

// Data set

training_batch.fill(training_batches[iteration],
input_variable_indices,
target_variable_indices,
decoder_variable_indices);
/*
decoder_variable_indices,
target_variable_indices);



// Neural network

neural_network->forward_propagate(training_batch.get_input_pairs(),
training_forward_propagation,
is_training);
// Loss index

loss_index->back_propagate(training_batch,
Expand All @@ -289,7 +290,6 @@ TrainingResults AdaptiveMomentEstimation::perform_training()

//if(display && epoch % display_period == 0)
// display_progress_bar(iteration, training_batches_number - 1);
*/
}

// Loss
Expand Down
98 changes: 38 additions & 60 deletions opennn/batch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
#include "tensors.h"
#include "image_data_set.h"
#include "images.h"
//#include "language_data_set.h"

namespace opennn
{
Expand All @@ -36,12 +35,12 @@ void Batch::fill(const vector<Index>& sample_indices,
}
else
{
//fill_tensor_data(data, sample_indices, input_indices, input_tensor.data());
fill_tensor_data(data, sample_indices, input_indices, input_tensor.data());
}

//fill_tensor_data(data, sample_indices, decoder_indices, decoder_tensor.data());
fill_tensor_data(data, sample_indices, decoder_indices, decoder_tensor.data());

//fill_tensor_data(data, sample_indices, target_indices, target_tensor.data());
fill_tensor_data(data, sample_indices, target_indices, target_tensor.data());
}


Expand Down Expand Up @@ -122,67 +121,42 @@ Batch::Batch(const Index& new_samples_number, DataSet* new_data_set)
}


void Batch::set(const Index& new_batch_size, DataSet* new_data_set)
void Batch::set(const Index& new_samples_number, DataSet* new_data_set)
{
if (!new_data_set) return;

samples_number = new_batch_size;
samples_number = new_samples_number;
data_set = new_data_set;

const dimensions& data_set_input_dimensions = data_set->get_input_dimensions();
const dimensions& data_set_decoder_dimensions = data_set->get_decoder_dimensions();
const dimensions& data_set_target_dimensions = data_set->get_target_dimensions();

// Inputs

if(data_set_input_dimensions.size() == 2)
{
const Index rows_number = data_set_input_dimensions[0];
const Index columns_number = data_set_input_dimensions[1];

input_dimensions = {{samples_number, rows_number, columns_number}};
input_tensor.resize(samples_number*rows_number* columns_number);
}
else if(data_set_input_dimensions.size() == 3)
if (!data_set_input_dimensions.empty())
{
const Index rows_number = data_set_input_dimensions[0];
const Index columns_number = data_set_input_dimensions[1];
const Index channels = data_set_input_dimensions[2];
input_dimensions = { samples_number };
input_dimensions.insert(input_dimensions.end(), data_set_input_dimensions.begin(), data_set_input_dimensions.end());

input_dimensions = {{samples_number, rows_number, columns_number, channels}};
input_tensor.resize(samples_number*channels*rows_number*columns_number);
const Index input_size = accumulate(input_dimensions.begin(), input_dimensions.end(), 1, multiplies<Index>());
input_tensor.resize(input_size);
}

// Decoder

if(data_set_decoder_dimensions.size() == 2)
if (!data_set_decoder_dimensions.empty())
{
const Index rows_number = data_set_input_dimensions[0];
const Index columns_number = data_set_input_dimensions[1];
decoder_dimensions = { samples_number };
decoder_dimensions.insert(decoder_dimensions.end(), data_set_decoder_dimensions.begin(), data_set_decoder_dimensions.end());

decoder_dimensions = {{samples_number, rows_number, columns_number}};
decoder_tensor.resize(samples_number*rows_number* columns_number);
const Index decoder_size = accumulate(decoder_dimensions.begin(), decoder_dimensions.end(), 1, multiplies<Index>());
decoder_tensor.resize(decoder_size);
}

// Target

if(data_set_target_dimensions.size() == 2)
if (!data_set_target_dimensions.empty())
{
const Index rows_number = data_set_target_dimensions[0];
const Index columns_number = data_set_target_dimensions[1];
target_dimensions = { samples_number };
target_dimensions.insert(target_dimensions.end(), data_set_target_dimensions.begin(), data_set_target_dimensions.end());

target_dimensions = {{samples_number, rows_number, columns_number}};
target_tensor.resize(samples_number*rows_number*columns_number);
}
else if(data_set_target_dimensions.size() == 3)
{
const Index rows_number = data_set_target_dimensions[0];
const Index columns_number = data_set_target_dimensions[1];
const Index channels = data_set_target_dimensions[2];

target_dimensions = {{samples_number, rows_number, columns_number, channels}};

target_tensor.resize(samples_number*channels*rows_number*columns_number);
const Index target_size = accumulate(target_dimensions.begin(), target_dimensions.end(), 1, multiplies<Index>());
target_tensor.resize(target_size);
}
}

Expand All @@ -195,17 +169,15 @@ Index Batch::get_samples_number() const

void Batch::print() const
{
const Index inputs_rank = input_dimensions.size();
const Index targets_rank = target_dimensions.size();

cout << "Batch" << endl
<< "Inputs:" << endl
<< "Inputs dimensions:" << endl;
<< "Input dimensions:" << endl;

print_vector(input_dimensions);

for(Index i = 0; i < inputs_rank; i++)
cout << input_dimensions[i] << endl;

if(inputs_rank == 4)
/*
if(input_dimensions.size() == 4)
{
const TensorMap<Tensor<type, 4>> inputs((type*)input_tensor.data(),
input_dimensions[0],
Expand All @@ -215,18 +187,24 @@ void Batch::print() const
cout << inputs << endl;
}
*/

cout << "Decoder:" << endl
<< "Decoder dimensions:" << endl;

print_vector(decoder_dimensions);

cout << "Targets:" << endl
<< "Targets dimensions:" << endl;
<< "Target dimensions:" << endl;

print_vector(target_dimensions);

for(Index i = 0; i < targets_rank; i++)
cout << target_dimensions[i] << endl;
// const TensorMap<Tensor<type, 2>> targets((type*)target_tensor.data(),
// target_dimensions[0],
// target_dimensions[1]);

const TensorMap<Tensor<type, 2>> targets((type*)target_tensor.data(),
target_dimensions[0],
target_dimensions[1]);
// cout << targets << endl;

cout << targets << endl;
}


Expand Down
1 change: 0 additions & 1 deletion opennn/batch.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ struct Batch
dimensions target_dimensions;
Tensor<type, 1> target_tensor;


unique_ptr<ThreadPool> thread_pool;
unique_ptr<ThreadPoolDevice> thread_pool_device;
};
Expand Down
30 changes: 15 additions & 15 deletions opennn/data_set.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1689,6 +1689,7 @@ void DataSet::set(const filesystem::path& new_data_path,
const bool& new_has_ids,
const DataSet::Codification& new_codification)
{

set_default();

set_data_path(new_data_path);
Expand All @@ -1700,9 +1701,9 @@ void DataSet::set(const filesystem::path& new_data_path,
set_has_ids(new_has_ids);

set_codification(new_codification);

read_csv();

set_default_raw_variables_scalers();

set_default_raw_variables_uses();
Expand Down Expand Up @@ -2980,11 +2981,11 @@ void DataSet::print() const
<< "Number of variables: " << variables_number << "\n"
<< "Number of input variables: " << input_variables_number << "\n"
<< "Number of target variables: " << target_variables_bumber << "\n"
<< "Input variables dimensions: ";
<< "Input dimensions: ";

print_vector(get_input_dimensions());

cout << "Target variables dimensions: ";
cout << "Target dimensions: ";

print_vector(get_target_dimensions());

Expand Down Expand Up @@ -3866,7 +3867,7 @@ void DataSet::read_csv()

if(columns_number != 0) break;
}

const Index raw_variables_number = has_sample_ids
? columns_number - 1
: columns_number;
Expand All @@ -3891,7 +3892,7 @@ void DataSet::read_csv()
samples_number++;
set_default_raw_variables_names();
}

// Rest of lines

while(getline(file, line))
Expand All @@ -3912,7 +3913,7 @@ void DataSet::read_csv()

samples_number++;
}

for(Index i = 0; i < raw_variables_number; i++)
if(raw_variables[i].type == RawVariableType::Categorical
&& raw_variables[i].get_categories_number() == 2)
Expand All @@ -3921,24 +3922,24 @@ void DataSet::read_csv()
sample_uses.resize(samples_number);

sample_ids.resize(samples_number);

// const Index variables_number = get_variables_number();
const Index variables_number = columns_number;

const vector<vector<Index>> all_variable_indices = get_variable_indices();

data.resize(samples_number, variables_number);
data.setZero();

rows_missing_values_number = 0;

missing_values_number = 0;

raw_variables_missing_values_number.resize(raw_variables_number);
raw_variables_missing_values_number.setZero();

// Fill data

file.clear();
file.seekg(0);

Expand Down Expand Up @@ -4050,7 +4051,6 @@ void DataSet::read_csv()
}

sample_index++;

}

file.close();
Expand Down Expand Up @@ -4377,11 +4377,11 @@ void DataSet::fix_repeated_names()
}


vector<vector<Index>> DataSet::split_samples(const vector<Index>& sample_indices, const Index& new_batch_size) const
vector<vector<Index>> DataSet::split_samples(const vector<Index>& sample_indices, const Index& new_samples_number) const
{
const Index samples_number = sample_indices.size();

Index batch_size = new_batch_size;
Index batch_size = new_samples_number;

Index batches_number;

Expand All @@ -4395,7 +4395,7 @@ vector<vector<Index>> DataSet::split_samples(const vector<Index>& sample_indices
batches_number = samples_number / batch_size;
}

// const Index batches_number = (samples_number + new_batch_size - 1) / new_batch_size; // Round up division
// const Index batches_number = (samples_number + new_samples_number - 1) / new_samples_number; // Round up division

vector<vector<Index>> batches(batches_number);

Expand Down
7 changes: 6 additions & 1 deletion opennn/data_set.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,12 @@ class DataSet
// Set

void set(const Index& = 0, const dimensions& = {}, const dimensions& = {});
void set(const filesystem::path&, const string&, const bool& = true, const bool& = false, const DataSet::Codification& = Codification::UTF8);

void set(const filesystem::path&,
const string&,
const bool& = true,
const bool& = false,
const DataSet::Codification& = Codification::UTF8);

void set(const filesystem::path&);

Expand Down
8 changes: 4 additions & 4 deletions opennn/neural_network.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -433,20 +433,20 @@ void NeuralNetwork::set_image_classification(const dimensions& input_dimensions,
if (input_dimensions.size() != 3)
throw runtime_error("Input dimensions size is not 3.");

const Index complexity_size = complexity_dimensions.size();

add_layer(make_unique<ScalingLayer4D>(input_dimensions));

const Index complexity_size = complexity_dimensions.size();

for (Index i = 0; i < complexity_size; i++)
{
const dimensions kernel_dimensions = { 3, 3, get_output_dimensions()[2], complexity_dimensions[i] };
const dimensions convolution_stride_dimensions = { 1, 1 };
const dimensions stride_dimensions = { 1, 1 };
const ConvolutionalLayer::ConvolutionType convolution_type = ConvolutionalLayer::ConvolutionType::Valid;

add_layer(make_unique<ConvolutionalLayer>(get_output_dimensions(),
kernel_dimensions,
ConvolutionalLayer::ActivationFunction::RectifiedLinear,
convolution_stride_dimensions,
stride_dimensions,
convolution_type,
"convolutional_layer_" + to_string(i+1)));

Expand Down
Loading

0 comments on commit 9fa003b

Please sign in to comment.