From 7b783a71cc8b8021ec6c3f62e36525286110cffa Mon Sep 17 00:00:00 2001 From: Cheng Gong Date: Mon, 13 May 2024 16:31:03 -0400 Subject: [PATCH 1/2] add parallel FNN --- pinnicle/nn/nn.py | 12 +++++++++++- pinnicle/parameter.py | 3 +++ tests/test_nn.py | 16 ++++++++++++++++ tests/test_pinn.py | 27 ++++++++++++++++++++++++--- 4 files changed, 54 insertions(+), 4 deletions(-) diff --git a/pinnicle/nn/nn.py b/pinnicle/nn/nn.py index 7782196..a1283f3 100644 --- a/pinnicle/nn/nn.py +++ b/pinnicle/nn/nn.py @@ -10,7 +10,10 @@ def __init__(self, parameters=NNParameter()): self.parameters = parameters # create new NN - self.net = self.createFNN() + if self.parameters.is_parallel: + self.net = self.createPFNN() + else: + self.net = self.createFNN() # apply transform # by default, use min-max scale for the input @@ -29,6 +32,13 @@ def createFNN(self): """ layer_size = [self.parameters.input_size] + [self.parameters.num_neurons] * self.parameters.num_layers + [self.parameters.output_size] return dde.nn.FNN(layer_size, self.parameters.activation, self.parameters.initializer) + + def createPFNN(self): + """ + create a parallel fully connected neural network + """ + layer_size = [self.parameters.input_size] + [[self.parameters.num_neurons]*self.parameters.output_size] * self.parameters.num_layers + [self.parameters.output_size] + return dde.nn.PFNN(layer_size, self.parameters.activation, self.parameters.initializer) def _add_input_transform(self, func): """ diff --git a/pinnicle/parameter.py b/pinnicle/parameter.py index d028ab2..f140861 100644 --- a/pinnicle/parameter.py +++ b/pinnicle/parameter.py @@ -170,6 +170,9 @@ def set_default(self): self.activation = "tanh" self.initializer = "Glorot uniform" + # parallel neural network + self.is_parallel = False + # scaling parameters self.input_lb = None self.input_ub = None diff --git a/tests/test_nn.py b/tests/test_nn.py index 4da249b..35a6279 100644 --- a/tests/test_nn.py +++ b/tests/test_nn.py @@ -38,3 +38,19 @@ def test_output_scale_nn(): x = np.linspace(-1.0, 1.0, 100) assert np.all(p.net._output_transform(0, x) > d.output_lb - d.output_lb*np.finfo(float).eps) assert np.all(p.net._output_transform(0, x) < d.output_ub + d.output_ub*np.finfo(float).eps) + +def test_pfnn(): + hp={} + hp['input_variables'] = ['x','y'] + hp['output_variables'] = ['u', 'v','s'] + hp['num_neurons'] = 4 + hp['num_layers'] = 5 + hp['is_parallel'] = False + d = NNParameter(hp) + p = pinn.nn.FNN(d) + assert len(p.net.layers) == 6 + hp['is_parallel'] = True + d = NNParameter(hp) + p = pinn.nn.FNN(d) + assert len(p.net.layers) == 18 + diff --git a/tests/test_pinn.py b/tests/test_pinn.py index 8d1347f..d320c55 100644 --- a/tests/test_pinn.py +++ b/tests/test_pinn.py @@ -33,7 +33,7 @@ hp["activation"] = "tanh" hp["initializer"] = "Glorot uniform" hp["num_neurons"] = 10 -hp["num_layers"] = 6 +hp["num_layers"] = 4 # data issm = {} @@ -98,7 +98,29 @@ def test_save_and_load_setting(tmp_path): experiment2 = pinn.PINN(loadFrom=tmp_path) assert experiment.params.param_dict == experiment2.params.param_dict -#def test_train(tmp_path): +def test_train(tmp_path): + hp["is_save"] = False + hp["num_collocation_points"] = 100 + issm["data_size"] = {"u":100, "v":100, "s":100, "H":100, "C":None, "vel":100} + hp["data"] = {"ISSM": issm} + experiment = pinn.PINN(params=hp) + experiment.compile() + experiment.train() + assert experiment.loss_names == ['fSSA1', 'fSSA2', 'u', 'v', 's', 'H', 'C', "vel log"] + +def test_train_PFNN(tmp_path): + hp["is_parallel"] = True + hp["is_save"] = False + hp["num_collocation_points"] = 100 + issm["data_size"] = {"u":100, "v":100, "s":100, "H":100, "C":None, "vel":100} + hp["data"] = {"ISSM": issm} + experiment = pinn.PINN(params=hp) + experiment.compile() + experiment.train() + assert experiment.loss_names == ['fSSA1', 'fSSA2', 'u', 'v', 's', 'H', 'C', "vel log"] + assert len(experiment.model.net.trainable_weights) == 50 + +#def test_save_train(tmp_path): # hp["save_path"] = str(tmp_path) # hp["is_save"] = True # hp["num_collocation_points"] = 100 @@ -143,7 +165,6 @@ def test_only_callbacks(tmp_path): assert callbacks is not None assert len(callbacks) == 3 - def test_plot(tmp_path): hp["save_path"] = str(tmp_path) hp["is_save"] = True From 187028b7ef6d428a8d8742264c7293350632757c Mon Sep 17 00:00:00 2001 From: Cheng Gong Date: Mon, 13 May 2024 22:32:04 -0400 Subject: [PATCH 2/2] use different number of neurons per layer --- pinnicle/nn/nn.py | 21 +++++++++++++++++++-- pinnicle/parameter.py | 4 ++++ tests/test_nn.py | 15 +++++++++++++++ tests/test_parameters.py | 4 +++- tests/test_pinn.py | 5 ++++- 5 files changed, 45 insertions(+), 4 deletions(-) diff --git a/pinnicle/nn/nn.py b/pinnicle/nn/nn.py index a1283f3..0bc6c38 100644 --- a/pinnicle/nn/nn.py +++ b/pinnicle/nn/nn.py @@ -30,14 +30,31 @@ def createFNN(self): """ create a fully connected neural network """ - layer_size = [self.parameters.input_size] + [self.parameters.num_neurons] * self.parameters.num_layers + [self.parameters.output_size] + if isinstance(self.parameters.num_neurons, list): + # directly use the given list of num_neurons + layer_size = [self.parameters.input_size] + \ + self.parameters.num_neurons + \ + [self.parameters.output_size] + else: + # repeat num_layers times + layer_size = [self.parameters.input_size] + \ + [self.parameters.num_neurons] * self.parameters.num_layers + \ + [self.parameters.output_size] + return dde.nn.FNN(layer_size, self.parameters.activation, self.parameters.initializer) def createPFNN(self): """ create a parallel fully connected neural network """ - layer_size = [self.parameters.input_size] + [[self.parameters.num_neurons]*self.parameters.output_size] * self.parameters.num_layers + [self.parameters.output_size] + if isinstance(self.parameters.num_neurons, list): + layer_size = [self.parameters.input_size] + \ + [[n]*self.parameters.output_size for n in self.parameters.num_neurons] + \ + [self.parameters.output_size] + else: + layer_size = [self.parameters.input_size] + \ + [[self.parameters.num_neurons]*self.parameters.output_size] * self.parameters.num_layers + \ + [self.parameters.output_size] return dde.nn.PFNN(layer_size, self.parameters.activation, self.parameters.initializer) def _add_input_transform(self, func): diff --git a/pinnicle/parameter.py b/pinnicle/parameter.py index f140861..9cfb5cd 100644 --- a/pinnicle/parameter.py +++ b/pinnicle/parameter.py @@ -210,6 +210,10 @@ def set_parameters(self, pdict: dict): super().set_parameters(pdict) self.input_size = len(self.input_variables) self.output_size = len(self.output_variables) + # num_eurons is list + if isinstance(self.num_neurons, list): + self.num_layers = len(self.num_neurons) + class PhysicsParameter(ParameterBase): diff --git a/tests/test_nn.py b/tests/test_nn.py index 35a6279..3c270a0 100644 --- a/tests/test_nn.py +++ b/tests/test_nn.py @@ -54,3 +54,18 @@ def test_pfnn(): p = pinn.nn.FNN(d) assert len(p.net.layers) == 18 +def test_pfnn_list_neuron(): + hp={} + hp['input_variables'] = ['x','y'] + hp['output_variables'] = ['u', 'v','s'] + hp['num_neurons'] = [3,4,5] + hp['num_layers'] = 5 + hp['is_parallel'] = False + d = NNParameter(hp) + p = pinn.nn.FNN(d) + assert len(p.net.layers) == 4 + hp['is_parallel'] = True + d = NNParameter(hp) + p = pinn.nn.FNN(d) + assert len(p.net.layers) == 12 + diff --git a/tests/test_parameters.py b/tests/test_parameters.py index 91d7516..c9ae5ae 100644 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -59,7 +59,9 @@ def test_nn_parameter(): d.output_lb = 1 d.output_ub = 10 assert d.is_output_scaling() - + d = NNParameter({"num_neurons":[1,2,3]}) + assert d.num_layers == 3 + def test_parameters(): p = Parameters() domain = DomainParameter() diff --git a/tests/test_pinn.py b/tests/test_pinn.py index 0b2aa0d..63fade2 100644 --- a/tests/test_pinn.py +++ b/tests/test_pinn.py @@ -114,12 +114,15 @@ def test_train_PFNN(tmp_path): hp["is_save"] = False hp["num_collocation_points"] = 100 issm["data_size"] = {"u":100, "v":100, "s":100, "H":100, "C":None, "vel":100} + hp["num_neurons"] = [4,10]; hp["data"] = {"ISSM": issm} experiment = pinn.PINN(params=hp) experiment.compile() experiment.train() assert experiment.loss_names == ['fSSA1', 'fSSA2', 'u', 'v', 's', 'H', 'C', "vel log"] - assert len(experiment.model.net.trainable_weights) == 50 + assert experiment.params.nn.num_layers == 2 + assert len(experiment.model.net.layers) == 5*(2+1) + assert len(experiment.model.net.trainable_weights) == 30 #def test_save_train(tmp_path): # hp["save_path"] = str(tmp_path)