diff --git a/README.md b/README.md index 43e0c11..279411d 100644 --- a/README.md +++ b/README.md @@ -125,7 +125,7 @@ The Simulation object import a YAML file with all the problem definitions. An ex num_hidden_layers: 4 num_neurons_per_layer: 200 activation: tanh - adaptative_activation: true + adaptive_activation: true fourier_features: true weight_factorization: false @@ -134,7 +134,7 @@ The Simulation object import a YAML file with all the problem definitions. An ex num_hidden_layers: 4 num_neurons_per_layer: 200 activation: tanh - adaptative_activation: true + adaptive_activation: true fourier_features: true weight_factorization: false ``` diff --git a/tests/simulations_yaml/test_arg.yaml b/tests/simulations_yaml/test_arg.yaml index 3cc0934..f3de4b2 100644 --- a/tests/simulations_yaml/test_arg.yaml +++ b/tests/simulations_yaml/test_arg.yaml @@ -40,7 +40,7 @@ hyperparameters_in: num_neurons_per_layer: 20 output_dim: 1 activation: tanh - adaptative_activation: true + adaptive_activation: true architecture_Net: FCNN fourier_features: true weight_factorization: true @@ -51,7 +51,7 @@ hyperparameters_out: num_neurons_per_layer: 20 output_dim: 1 activation: tanh - adaptative_activation: true + adaptive_activation: true architecture_Net: FCNN fourier_features: true weight_factorization: true diff --git a/tests/simulations_yaml/test_born_ion.yaml b/tests/simulations_yaml/test_born_ion.yaml index a0e5e40..20ada10 100644 --- a/tests/simulations_yaml/test_born_ion.yaml +++ b/tests/simulations_yaml/test_born_ion.yaml @@ -40,7 +40,7 @@ hyperparameters_in: num_neurons_per_layer: 20 output_dim: 1 activation: tanh - adaptative_activation: true + adaptive_activation: true architecture_Net: FCNN fourier_features: true weight_factorization: true @@ -51,7 +51,7 @@ hyperparameters_out: num_neurons_per_layer: 20 output_dim: 1 activation: tanh - adaptative_activation: true + adaptive_activation: true architecture_Net: FCNN fourier_features: true weight_factorization: true diff --git a/tests/simulations_yaml/test_methanol.yaml b/tests/simulations_yaml/test_methanol.yaml index 24c0b83..b1dda0a 100644 --- a/tests/simulations_yaml/test_methanol.yaml +++ b/tests/simulations_yaml/test_methanol.yaml @@ -40,7 +40,7 @@ hyperparameters_in: num_neurons_per_layer: 20 output_dim: 1 activation: tanh - adaptative_activation: true + adaptive_activation: true architecture_Net: FCNN fourier_features: true weight_factorization: true @@ -51,7 +51,7 @@ hyperparameters_out: num_neurons_per_layer: 20 output_dim: 1 activation: tanh - adaptative_activation: true + adaptive_activation: true architecture_Net: FCNN fourier_features: true weight_factorization: true diff --git a/tests/simulations_yaml/test_sphere_+1-1.yaml b/tests/simulations_yaml/test_sphere_+1-1.yaml index d3eec1d..2d39141 100644 --- a/tests/simulations_yaml/test_sphere_+1-1.yaml +++ b/tests/simulations_yaml/test_sphere_+1-1.yaml @@ -40,7 +40,7 @@ hyperparameters_in: num_neurons_per_layer: 20 output_dim: 1 activation: tanh - adaptative_activation: true + adaptive_activation: true architecture_Net: FCNN fourier_features: true weight_factorization: true @@ -51,7 +51,7 @@ hyperparameters_out: num_neurons_per_layer: 20 output_dim: 1 activation: tanh - adaptative_activation: true + adaptive_activation: true architecture_Net: FCNN fourier_features: true weight_factorization: true diff --git a/xppbe/NN/NeuralNet.py b/xppbe/NN/NeuralNet.py index 58965c0..f5e6948 100644 --- a/xppbe/NN/NeuralNet.py +++ b/xppbe/NN/NeuralNet.py @@ -54,7 +54,7 @@ def __init__(self, num_neurons_per_layer=20, num_hidden_blocks=2, activation='tanh', - adaptative_activation=False, + adaptive_activation=False, kernel_initializer='glorot_normal', architecture_Net='FCNN', fourier_features=False, @@ -75,7 +75,7 @@ def __init__(self, self.num_hidden_blocks = num_hidden_blocks self.activation = activation - self.adaptative_activation = adaptative_activation + self.adaptive_activation = adaptive_activation self.kernel_initializer = kernel_initializer self.architecture_Net = architecture_Net @@ -143,7 +143,7 @@ def create_FCNN(self): layer = self.Dense_Layer(self.num_neurons_per_layer, activation=CustomActivation(units=self.num_neurons_per_layer, activation=self.activation, - adaptative_activation=self.adaptative_activation), + adaptive_activation=self.adaptive_activation), kernel_initializer=self.kernel_initializer, name=f'layer_{i}') self.hidden_layers.append(layer) @@ -154,13 +154,13 @@ def create_ModMLP(self): self.U = self.Dense_Layer(self.num_neurons_per_layer, activation=CustomActivation(units=self.num_neurons_per_layer, activation=self.activation, - adaptative_activation=self.adaptative_activation), + adaptive_activation=self.adaptive_activation), kernel_initializer=self.kernel_initializer, name=f'layer_u') self.V = self.Dense_Layer(self.num_neurons_per_layer, activation=CustomActivation(units=self.num_neurons_per_layer, activation=self.activation, - adaptative_activation=self.adaptative_activation), + adaptive_activation=self.adaptive_activation), kernel_initializer=self.kernel_initializer, name=f'layer_v') self.call_architecture = self.call_ModMLP @@ -170,7 +170,7 @@ def create_ResNet(self): self.first = self.Dense_Layer(self.num_neurons_per_layer, activation=CustomActivation(units=self.num_neurons_per_layer, activation=self.activation, - adaptative_activation=self.adaptative_activation), + adaptive_activation=self.adaptive_activation), kernel_initializer=self.kernel_initializer, name=f'layer_0') self.hidden_blocks = list() @@ -180,7 +180,7 @@ def create_ResNet(self): block.add(self.Dense_Layer(self.num_neurons_per_layer, activation=CustomActivation(units=self.num_neurons_per_layer, activation=self.activation, - adaptative_activation=self.adaptative_activation), + adaptive_activation=self.adaptive_activation), kernel_initializer=self.kernel_initializer)) block.add(self.Dense_Layer(self.num_neurons_per_layer, activation=None, @@ -188,13 +188,13 @@ def create_ResNet(self): self.hidden_blocks.append(block) activation_layer = tf.keras.layers.Activation(activation=CustomActivation(units=self.num_neurons_per_layer, activation=self.activation, - adaptative_activation=self.adaptative_activation)) + adaptive_activation=self.adaptive_activation)) self.hidden_blocks_activations.append(activation_layer) self.last = self.Dense_Layer(self.num_neurons_per_layer, activation=CustomActivation(units=self.num_neurons_per_layer, activation=self.activation, - adaptative_activation=self.adaptative_activation), + adaptive_activation=self.adaptive_activation), kernel_initializer=self.kernel_initializer, name=f'layer_1') self.call_architecture = self.call_ResNet @@ -235,17 +235,17 @@ def call_ResNet(self, X): class CustomActivation(tf.keras.layers.Layer): - def __init__(self, units=1, activation='tanh', adaptative_activation=False, **kwargs): + def __init__(self, units=1, activation='tanh', adaptive_activation=False, **kwargs): super(CustomActivation, self).__init__(**kwargs) self.units = units self.activation = activation - self.adaptative_activation = adaptative_activation + self.adaptive_activation = adaptive_activation def build(self, input_shape): self.a = self.add_weight(name='a', shape=(self.units,), initializer='ones', - trainable=self.adaptative_activation) + trainable=self.adaptive_activation) def call(self, inputs): a_expanded = tf.expand_dims(self.a, axis=0) diff --git a/xppbe/Simulation.yaml b/xppbe/Simulation.yaml index f53e682..59cb63d 100644 --- a/xppbe/Simulation.yaml +++ b/xppbe/Simulation.yaml @@ -66,7 +66,7 @@ hyperparameters_in: num_neurons_per_layer: 200 output_dim: 1 activation: tanh - adaptative_activation: true + adaptive_activation: true architecture_Net: FCNN fourier_features: true weight_factorization: false @@ -79,7 +79,7 @@ hyperparameters_out: num_neurons_per_layer: 200 output_dim: 1 activation: tanh - adaptative_activation: true + adaptive_activation: true architecture_Net: FCNN fourier_features: true weight_factorization: false