From 968c1a16e2b850dd507eecd3e73d39b094505a27 Mon Sep 17 00:00:00 2001 From: FilippoOlivo Date: Thu, 31 Oct 2024 14:26:20 +0100 Subject: [PATCH] Bug fix in LabelTensor, Dataset and DataLoader and codacy warning correction --- pina/condition/data_condition.py | 2 +- pina/condition/domain_equation_condition.py | 3 +- pina/condition/input_equation_condition.py | 3 +- pina/condition/input_output_condition.py | 3 +- pina/data/base_dataset.py | 9 +- pina/data/data_module.py | 21 +-- pina/data/pina_batch.py | 16 +- pina/data/pina_dataloader.py | 52 ++++-- pina/data/pina_subset.py | 19 ++- pina/label_tensor.py | 112 ++++++++----- pina/solvers/pinns/basepinn.py | 84 +++++++--- pina/solvers/pinns/pinn.py | 21 +-- pina/solvers/solver.py | 6 +- pina/solvers/supervised.py | 6 +- pina/trainer.py | 24 +-- tests/test_dataset.py | 70 ++++---- .../test_label_tensor/test_label_tensor_01.py | 2 +- tests/test_solvers/test_supervised_solver.py | 2 +- tutorials/tutorial5/tutorial.ipynb | 154 ++++++++---------- 19 files changed, 339 insertions(+), 270 deletions(-) diff --git a/pina/condition/data_condition.py b/pina/condition/data_condition.py index c6777231..05c543eb 100644 --- a/pina/condition/data_condition.py +++ b/pina/condition/data_condition.py @@ -15,6 +15,7 @@ class DataConditionInterface(ConditionInterface): """ __slots__ = ["input_points", "conditional_variables"] + condition_type = ['unsupervised'] def __init__(self, input_points, conditional_variables=None): """ @@ -23,7 +24,6 @@ def __init__(self, input_points, conditional_variables=None): super().__init__() self.input_points = input_points self.conditional_variables = conditional_variables - self._condition_type = 'unsupervised' def __setattr__(self, key, value): if (key == 'input_points') or (key == 'conditional_variables'): diff --git a/pina/condition/domain_equation_condition.py b/pina/condition/domain_equation_condition.py index 58dca70b..53e07621 100644 --- a/pina/condition/domain_equation_condition.py +++ b/pina/condition/domain_equation_condition.py @@ -13,7 +13,7 @@ class DomainEquationCondition(ConditionInterface): """ __slots__ = ["domain", "equation"] - + condition_type = ['physics'] def __init__(self, domain, equation): """ TODO @@ -21,7 +21,6 @@ def __init__(self, domain, equation): super().__init__() self.domain = domain self.equation = equation - self._condition_type = 'physics' def __setattr__(self, key, value): if key == 'domain': diff --git a/pina/condition/input_equation_condition.py b/pina/condition/input_equation_condition.py index bf05130c..2a7f4647 100644 --- a/pina/condition/input_equation_condition.py +++ b/pina/condition/input_equation_condition.py @@ -14,7 +14,7 @@ class InputPointsEquationCondition(ConditionInterface): """ __slots__ = ["input_points", "equation"] - + condition_type = ['physics'] def __init__(self, input_points, equation): """ TODO @@ -22,7 +22,6 @@ def __init__(self, input_points, equation): super().__init__() self.input_points = input_points self.equation = equation - self._condition_type = 'physics' def __setattr__(self, key, value): if key == 'input_points': diff --git a/pina/condition/input_output_condition.py b/pina/condition/input_output_condition.py index 08ed21d9..e9c34bea 100644 --- a/pina/condition/input_output_condition.py +++ b/pina/condition/input_output_condition.py @@ -13,7 +13,7 @@ class InputOutputPointsCondition(ConditionInterface): """ __slots__ = ["input_points", "output_points"] - + condition_type = ['supervised'] def __init__(self, input_points, output_points): """ TODO @@ -21,7 +21,6 @@ def __init__(self, input_points, output_points): super().__init__() self.input_points = input_points self.output_points = output_points - self._condition_type = ['supervised', 'physics'] def __setattr__(self, key, value): if (key == 'input_points') or (key == 'output_points'): diff --git a/pina/data/base_dataset.py b/pina/data/base_dataset.py index 2c28ba30..d05784f8 100644 --- a/pina/data/base_dataset.py +++ b/pina/data/base_dataset.py @@ -5,7 +5,6 @@ import logging from torch.utils.data import Dataset - from ..label_tensor import LabelTensor @@ -109,14 +108,14 @@ def initialize(self): already filled """ logging.debug(f'Initialize dataset {self.__class__.__name__}') - if self.num_el_per_condition: self.condition_indices = torch.cat([ - torch.tensor([i] * self.num_el_per_condition[i], - dtype=torch.uint8) + torch.tensor( + [self.conditions_idx[i]] * self.num_el_per_condition[i], + dtype=torch.uint8) for i in range(len(self.num_el_per_condition)) ], - dim=0) + dim=0) for slot in self.__slots__: current_attribute = getattr(self, slot) if all(isinstance(a, LabelTensor) for a in current_attribute): diff --git a/pina/data/data_module.py b/pina/data/data_module.py index bd117b54..ea6a802c 100644 --- a/pina/data/data_module.py +++ b/pina/data/data_module.py @@ -23,8 +23,8 @@ def __init__(self, problem, device, train_size=.7, - test_size=.1, - val_size=.2, + test_size=.2, + val_size=.1, predict_size=0., batch_size=None, shuffle=True, @@ -61,28 +61,30 @@ def __init__(self, if train_size > 0: self.split_names.append('train') self.split_length.append(train_size) - self.loader_functions['train_dataloader'] = lambda: PinaDataLoader( - self.splits['train'], self.batch_size, self.condition_names) + self.loader_functions['train_dataloader'] = lambda \ + x: PinaDataLoader(self.splits['train'], self.batch_size, + self.condition_names) if test_size > 0: self.split_length.append(test_size) self.split_names.append('test') - self.loader_functions['test_dataloader'] = lambda: PinaDataLoader( + self.loader_functions['test_dataloader'] = lambda x: PinaDataLoader( self.splits['test'], self.batch_size, self.condition_names) if val_size > 0: self.split_length.append(val_size) self.split_names.append('val') - self.loader_functions['val_dataloader'] = lambda: PinaDataLoader( + self.loader_functions['val_dataloader'] = lambda x: PinaDataLoader( self.splits['val'], self.batch_size, self.condition_names) if predict_size > 0: self.split_length.append(predict_size) self.split_names.append('predict') - self.loader_functions['predict_dataloader'] = lambda: PinaDataLoader( + self.loader_functions[ + 'predict_dataloader'] = lambda x: PinaDataLoader( self.splits['predict'], self.batch_size, self.condition_names) self.splits = {k: {} for k in self.split_names} self.shuffle = shuffle for k, v in self.loader_functions.items(): - setattr(self, k, v) + setattr(self, k, v.__get__(self, PinaDataModule)) def prepare_data(self): if self.datasets is None: @@ -140,12 +142,11 @@ def dataset_split(dataset, lengths, seed=None, shuffle=True): indices = torch.randperm(sum(lengths)) dataset.apply_shuffle(indices) - indices = torch.arange(0, sum(lengths), 1, dtype=torch.uint8).tolist() offsets = [ sum(lengths[:i]) if i > 0 else 0 for i in range(len(lengths)) ] return [ - PinaSubset(dataset, indices[offset:offset + length]) + PinaSubset(dataset, slice(offset, offset + length)) for offset, length in zip(offsets, lengths) ] diff --git a/pina/data/pina_batch.py b/pina/data/pina_batch.py index c5d1b61d..79c076da 100644 --- a/pina/data/pina_batch.py +++ b/pina/data/pina_batch.py @@ -27,21 +27,27 @@ def __len__(self): :rtype: int """ length = 0 - for dataset in dir(self): + for dataset in self.attributes: attribute = getattr(self, dataset) - if isinstance(attribute, list): - length += len(getattr(self, dataset)) + length += len(attribute) return length def __getattribute__(self, item): if item in super().__getattribute__('attributes'): dataset = super().__getattribute__(item) index = super().__getattribute__(item + '_idx') - return PinaSubset(dataset.dataset, dataset.indices[index]) + if isinstance(dataset, PinaSubset): + dataset_index = dataset.indices + if isinstance(dataset_index, slice): + index = slice(dataset_index.start + index.start, + min(dataset_index.start + index.stop, + dataset_index.stop)) + return PinaSubset(dataset.dataset, index, + require_grad=self.require_grad) return super().__getattribute__(item) def __getattr__(self, item): if item == 'data' and len(self.attributes) == 1: item = self.attributes[0] - return super().__getattribute__(item) + return self.__getattribute__(item) raise AttributeError(f"'Batch' object has no attribute '{item}'") diff --git a/pina/data/pina_dataloader.py b/pina/data/pina_dataloader.py index e2d3fb76..a28ca6c6 100644 --- a/pina/data/pina_dataloader.py +++ b/pina/data/pina_dataloader.py @@ -2,6 +2,7 @@ This module is used to create an iterable object used during training """ import math + from .pina_batch import Batch @@ -26,6 +27,7 @@ def __init__(self, dataset_dict, batch_size, condition_names) -> None: """ self.condition_names = condition_names self.dataset_dict = dataset_dict + self.batch_size = batch_size self._init_batches(batch_size) def _init_batches(self, batch_size=None): @@ -36,20 +38,46 @@ def _init_batches(self, batch_size=None): n_elements = sum(len(v) for v in self.dataset_dict.values()) if batch_size is None: batch_size = n_elements - indexes_dict = {} + self.batch_size = n_elements n_batches = int(math.ceil(n_elements / batch_size)) - for k, v in self.dataset_dict.items(): - if n_batches != 1: - indexes_dict[k] = math.floor(len(v) / (n_batches - 1)) - else: - indexes_dict[k] = len(v) - for i in range(n_batches): - temp_dict = {} - for k, v in indexes_dict.items(): - if i != n_batches - 1: - temp_dict[k] = slice(i * v, (i + 1) * v) + indexes_dict = { + k: math.floor(len(v) / n_batches) if n_batches != 1 else len(v) for + k, v in self.dataset_dict.items()} + + dataset_names = list(self.dataset_dict.keys()) + num_el_per_batch = [{i: indexes_dict[i] for i in dataset_names} for _ + in range(n_batches - 1)] + [ + {i: 0 for i in dataset_names}] + reminders = { + i: len(self.dataset_dict[i]) - indexes_dict[i] * (n_batches - 1) for + i in dataset_names} + dataset_names = iter(dataset_names) + name = next(dataset_names, None) + for batch in num_el_per_batch: + tot_num_el = sum(batch.values()) + batch_reminder = batch_size - tot_num_el + for _ in range(batch_reminder): + if name is None: + break + if reminders[name] > 0: + batch[name] += 1 + reminders[name] -= 1 else: - temp_dict[k] = slice(i * v, len(self.dataset_dict[k])) + name = next(dataset_names, None) + if name is None: + break + batch[name] += 1 + reminders[name] -= 1 + + reminders, dataset_names, indexes_dict = None, None, None # free memory + actual_indices = {k: 0 for k in self.dataset_dict.keys()} + for batch in num_el_per_batch: + temp_dict = {} + total_length = 0 + for k, v in batch.items(): + temp_dict[k] = slice(actual_indices[k], actual_indices[k] + v) + actual_indices[k] = actual_indices[k] + v + total_length += v self.batches.append( Batch(idx_dict=temp_dict, dataset_dict=self.dataset_dict)) diff --git a/pina/data/pina_subset.py b/pina/data/pina_subset.py index 275541e9..b5b74a68 100644 --- a/pina/data/pina_subset.py +++ b/pina/data/pina_subset.py @@ -11,7 +11,7 @@ class PinaSubset: """ __slots__ = ['dataset', 'indices', 'require_grad'] - def __init__(self, dataset, indices, require_grad=True): + def __init__(self, dataset, indices, require_grad=False): """ TODO """ @@ -23,14 +23,27 @@ def __len__(self): """ TODO """ + if isinstance(self.indices, slice): + return self.indices.stop - self.indices.start return len(self.indices) def __getattr__(self, name): tensor = self.dataset.__getattribute__(name) if isinstance(tensor, (LabelTensor, Tensor)): - tensor = tensor[[self.indices]].to(self.dataset.device) + if isinstance(self.indices, slice): + tensor = tensor[self.indices] + if (tensor.device != self.dataset.device + and tensor.dtype == float32): + tensor = tensor.to(self.dataset.device) + elif isinstance(self.indices, list): + tensor = tensor[[self.indices]].to(self.dataset.device) + else: + raise ValueError(f"Indices type {type(self.indices)} not " + f"supported") return tensor.requires_grad_( self.require_grad) if tensor.dtype == float32 else tensor if isinstance(tensor, list): - return [tensor[i] for i in self.indices] + if isinstance(self.indices, list): + return [tensor[i] for i in self.indices] + return tensor[self.indices] raise AttributeError(f"No attribute named {name}") diff --git a/pina/label_tensor.py b/pina/label_tensor.py index 719975c5..ccf7aeee 100644 --- a/pina/label_tensor.py +++ b/pina/label_tensor.py @@ -2,6 +2,9 @@ from copy import copy, deepcopy import torch from torch import Tensor +import warnings + +full_labels = True def issubset(a, b): @@ -20,7 +23,9 @@ class LabelTensor(torch.Tensor): @staticmethod def __new__(cls, x, labels, *args, **kwargs): + full = kwargs.pop("full", full_labels) if isinstance(x, LabelTensor): + x.full = full return x else: return super().__new__(cls, x, *args, **kwargs) @@ -41,7 +46,7 @@ def __init__(self, x, labels, **kwargs): """ self.dim_names = None - self.full = kwargs.get('full', True) + self.full = kwargs.get('full', full_labels) self.labels = labels @classmethod @@ -53,7 +58,7 @@ def __internal_init__(cls, **kwargs): lt = cls.__new__(cls, x, labels, *args, **kwargs) lt._labels = labels - lt.full = kwargs.get('full', True) + lt.full = kwargs.get('full', full_labels) lt.dim_names = dim_names return lt @@ -124,13 +129,12 @@ def _init_labels_from_dict(self, labels): does not match with tensor shape """ tensor_shape = self.shape - if hasattr(self, 'full') and self.full: labels = { i: labels[i] if i in labels else { - 'name': i + 'name': i, 'dof': range(tensor_shape[i]) } - for i in labels.keys() + for i in range(len(tensor_shape)) } for k, v in labels.items(): # Init labels from str @@ -197,7 +201,6 @@ def extract(self, labels_to_extract): stored_keys = labels.keys() dim_names = self.dim_names ndim = len(super().shape) - # Convert tuple/list to dict if isinstance(labels_to_extract, (tuple, list)): if not ndim - 1 in stored_keys: @@ -215,7 +218,6 @@ def extract(self, labels_to_extract): # Initialize list used to perform extraction extractor = [slice(None) for _ in range(ndim)] - # Loop over labels_to_extract dict for k, v in labels_to_extract.items(): @@ -227,12 +229,11 @@ def extract(self, labels_to_extract): dim_labels = labels[idx_dim]['dof'] v = [v] if isinstance(v, (int, str)) else v - if not isinstance(v, range): extractor[idx_dim] = [dim_labels.index(i) for i in v] if len(v) > 1 else slice( - dim_labels.index(v[0]), - dim_labels.index(v[0]) + 1) + dim_labels.index(v[0]), + dim_labels.index(v[0]) + 1) else: extractor[idx_dim] = slice(v.start, v.stop) @@ -274,31 +275,33 @@ def cat(tensors, dim=0): return tensors[0] # Perform cat on tensors new_tensor = torch.cat(tensors, dim=dim) - + new_tensor_shape = new_tensor.shape # Update labels - labels = LabelTensor.__create_labels_cat(tensors, dim) + labels = LabelTensor.__create_labels_cat(tensors, dim, new_tensor_shape) return LabelTensor.__internal_init__(new_tensor, labels, tensors[0].dim_names) @staticmethod - def __create_labels_cat(tensors, dim): + def __create_labels_cat(tensors, dim, tensor_shape): # Check if names and dof of the labels are the same in all dimensions # except in dim stored_labels = [tensor.stored_labels for tensor in tensors] - # check if: # - labels dict have same keys # - all labels are the same expect for dimension dim - if not all( - all(stored_labels[i][k] == stored_labels[0][k] - for i in range(len(stored_labels))) - for k in stored_labels[0].keys() if k != dim): + if not all(all(stored_labels[i][k] == stored_labels[0][k] + for i in range(len(stored_labels))) + for k in stored_labels[0].keys() if k != dim): raise RuntimeError('tensors must have the same shape and dof') labels = {k: copy(v) for k, v in tensors[0].stored_labels.items()} if dim in labels.keys(): - last_dim_dof = [i for j in stored_labels for i in j[dim]['dof']] + labels_list = [j[dim]['dof'] for j in stored_labels] + if all(isinstance(j, range) for j in labels_list): + last_dim_dof = range(tensor_shape[dim]) + else: + last_dim_dof = [i for j in labels_list for i in j] labels[dim]['dof'] = last_dim_dof return labels @@ -316,10 +319,9 @@ def to(self, *args, **kwargs): Performs Tensor dtype and/or device conversion. For more details, see :meth:`torch.Tensor.to`. """ - tmp = super().to(*args, **kwargs) - new = self.__class__.clone(self) - new.data = tmp.data - return new + lt = super().to(*args, **kwargs) + return LabelTensor.__internal_init__(lt, + self.stored_labels, self.dim_names) def clone(self, *args, **kwargs): """ @@ -329,7 +331,8 @@ def clone(self, *args, **kwargs): :return: A copy of the tensor. :rtype: LabelTensor """ - labels = {k: copy(v) for k, v in self._labels.items()} + labels = {k: {sub_k: copy(sub_v) for sub_k, sub_v in v.items()} for k, v + in self.stored_labels.items()} out = LabelTensor(super().clone(*args, **kwargs), labels) return out @@ -402,11 +405,13 @@ def __getitem__(self, index): :param index: :return: """ - if isinstance(index, - str) or (isinstance(index, (tuple, list)) - and all(isinstance(a, str) for a in index)): + if isinstance(index, str) or (isinstance(index, (tuple, list)) + and all( + isinstance(a, str) for a in index)): return self.extract(index) + if isinstance(index, torch.Tensor) and index.dtype == torch.bool: + index = [index.nonzero().squeeze()] selected_lt = super().__getitem__(index) if isinstance(index, (int, slice)): @@ -415,15 +420,22 @@ def __getitem__(self, index): if index[0] == Ellipsis: index = [slice(None)] * (self.ndim - 1) + [index[1]] - if hasattr(self, "labels"): - labels = {k: copy(v) for k, v in self.stored_labels.items()} + try: + stored_labels = self.stored_labels + labels = {} for j, idx in enumerate(index): - if isinstance(idx, int): + if isinstance(idx, int) or ( + isinstance(idx, torch.Tensor) and idx.ndim == 0): selected_lt = selected_lt.unsqueeze(j) - if j in labels.keys() and idx != slice(None): - self._update_single_label(labels, labels, idx, j) + if j in self.stored_labels.keys() and idx != slice(None): + self._update_single_label(stored_labels, labels, idx, j) + labels.update( + {k: {sub_k: copy(sub_v) for sub_k, sub_v in v.items()} for k, v + in stored_labels.items() if k not in labels}) selected_lt = LabelTensor.__internal_init__(selected_lt, labels, self.dim_names) + except AttributeError: + warnings.warn('No attribute labels in LabelTensor') return selected_lt @staticmethod @@ -436,16 +448,25 @@ def _update_single_label(old_labels, to_update_labels, index, dim): :param dim: label index :return: """ + old_dof = old_labels[dim]['dof'] - if not isinstance( - index, - (int, slice)) and len(index) == len(old_dof) and isinstance( - old_dof, range): + if isinstance(index, torch.Tensor) and index.ndim == 0: + index = int(index) + if (not isinstance( + index, (int, slice)) and len(index) == len(old_dof) and + isinstance(old_dof, range)): return + if isinstance(index, torch.Tensor): - index = index.nonzero( - as_tuple=True - )[0] if index.dtype == torch.bool else index.tolist() + if isinstance(old_dof, range): + to_update_labels.update({ + dim: { + 'dof': index.tolist(), + 'name': old_labels[dim]['name'] + } + }) + return + index = index.tolist() if isinstance(index, list): to_update_labels.update({ dim: { @@ -453,12 +474,13 @@ def _update_single_label(old_labels, to_update_labels, index, dim): 'name': old_labels[dim]['name'] } }) - else: - to_update_labels.update( - {dim: { - 'dof': old_dof[index], - 'name': old_labels[dim]['name'] - }}) + return + to_update_labels.update( + {dim: { + 'dof': old_dof[index] if isinstance(old_dof[index], + (list, range)) else [old_dof[index]], + 'name': old_labels[dim]['name'] + }}) def sort_labels(self, dim=None): diff --git a/pina/solvers/pinns/basepinn.py b/pina/solvers/pinns/basepinn.py index 543f823f..f886b13c 100644 --- a/pina/solvers/pinns/basepinn.py +++ b/pina/solvers/pinns/basepinn.py @@ -1,14 +1,15 @@ """ Module for PINN """ -import sys from abc import ABCMeta, abstractmethod import torch - -from ...solvers.solver import SolverInterface -from pina.utils import check_consistency -from pina.loss.loss_interface import LossInterface -from pina.problem import InverseProblem from torch.nn.modules.loss import _Loss +from ...condition import InputOutputPointsCondition +from ...solvers.solver import SolverInterface +from ...utils import check_consistency +from ...loss.loss_interface import LossInterface +from ...problem import InverseProblem, AbstractProblem +from ...condition import DomainEquationCondition +from ...optim import TorchOptimizer, TorchScheduler torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732 @@ -25,13 +26,14 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta): to the user to choose which problem the implemented solver inheriting from this class is suitable for. """ - + accepted_condition_types = [DomainEquationCondition.condition_type[0], + InputOutputPointsCondition.condition_type[0]] def __init__( self, models, problem, optimizers, - optimizers_kwargs, + schedulers, extra_features, loss, ): @@ -53,11 +55,20 @@ def __init__( :param torch.nn.Module loss: The loss function used as minimizer, default :class:`torch.nn.MSELoss`. """ + if optimizers is None: + optimizers = TorchOptimizer(torch.optim.Adam, lr=0.001) + + if schedulers is None: + schedulers = TorchScheduler(torch.optim.lr_scheduler.ConstantLR) + + if loss is None: + loss = torch.nn.MSELoss() + super().__init__( models=models, problem=problem, optimizers=optimizers, - optimizers_kwargs=optimizers_kwargs, + schedulers=schedulers, extra_features=extra_features, ) @@ -85,6 +96,10 @@ def __init__( # variable will be stored with name = self.__logged_metric self.__logged_metric = None + self._model = self._pina_models[0] + self._optimizer = self._pina_optimizers[0] + self._scheduler = self._pina_schedulers[0] + def training_step(self, batch, _): """ The Physics Informed Solver Training Step. This function takes care @@ -100,28 +115,43 @@ def training_step(self, batch, _): """ condition_losses = [] - condition_idx = batch["condition"] - for condition_id in range(condition_idx.min(), condition_idx.max() + 1): + physics = batch.physics + if hasattr(batch, 'supervised'): + supervised = batch.supervised + condition_idx = supervised.condition_indices + else: + condition_idx = torch.tensor([]) + + for condition_id in torch.unique(condition_idx).tolist(): + condition_name = self._dataloader.condition_names[condition_id] + condition = self.problem.conditions[condition_name] + self.__logged_metric = condition_name + pts = batch.supervised.input_points + out = batch.supervised.output_points + output_pts = out[condition_idx == condition_id] + input_pts = pts[condition_idx == condition_id] + + input_pts.labels = pts.labels + output_pts.labels = out.labels + + loss = self.loss_data(input_points=input_pts, output_points=output_pts) + loss = loss.as_subclass(torch.Tensor) + + condition_idx = physics.condition_indices + for condition_id in torch.unique(condition_idx).tolist(): condition_name = self._dataloader.condition_names[condition_id] condition = self.problem.conditions[condition_name] - pts = batch["pts"] - # condition name is logged (if logs enabled) self.__logged_metric = condition_name + pts = batch.physics.input_points + input_pts = pts[condition_idx == condition_id] - if len(batch) == 2: - samples = pts[condition_idx == condition_id] - loss = self.loss_phys(samples, condition.equation) - elif len(batch) == 3: - samples = pts[condition_idx == condition_id] - ground_truth = batch["output"][condition_idx == condition_id] - loss = self.loss_data(samples, ground_truth) - else: - raise ValueError("Batch size not supported") + input_pts.labels = pts.labels + loss = self.loss_phys(pts, condition.equation) # add condition losses for each epoch - condition_losses.append(loss * condition.data_weight) + condition_losses.append(loss) # clamp unknown parameters in InverseProblem (if needed) self._clamp_params() @@ -130,7 +160,7 @@ def training_step(self, batch, _): total_loss = sum(condition_losses) return total_loss.as_subclass(torch.Tensor) - def loss_data(self, input_tensor, output_tensor): + def loss_data(self, input_points, output_points): """ The data loss for the PINN solver. It computes the loss between the network output against the true solution. This function @@ -142,9 +172,9 @@ def loss_data(self, input_tensor, output_tensor): :return: The residual loss averaged on the input coordinates :rtype: torch.Tensor """ - loss_value = self.loss(self.forward(input_tensor), output_tensor) + loss_value = self.loss(self.forward(input_points), output_points) self.store_log(loss_value=float(loss_value)) - return self.loss(self.forward(input_tensor), output_tensor) + return loss_value @abstractmethod def loss_phys(self, samples, equation): @@ -195,6 +225,7 @@ def store_log(self, loss_value): :param str name: The name of the loss. :param torch.Tensor loss_value: The value of the loss. """ + print(self.__logged_metric) self.log( self.__logged_metric + "_loss", loss_value, @@ -202,6 +233,7 @@ def store_log(self, loss_value): logger=True, on_epoch=True, on_step=False, + batch_size=self._dataloader.batch_size, ) self.__logged_res_losses.append(loss_value) diff --git a/pina/solvers/pinns/pinn.py b/pina/solvers/pinns/pinn.py index 15f90818..22bf0c31 100644 --- a/pina/solvers/pinns/pinn.py +++ b/pina/solvers/pinns/pinn.py @@ -9,10 +9,8 @@ _LRScheduler as LRScheduler, ) # torch < 2.0 -from torch.optim.lr_scheduler import ConstantLR from .basepinn import PINNInterface -from pina.utils import check_consistency from pina.problem import InverseProblem @@ -56,16 +54,16 @@ class PINN(PINNInterface): DOI: `10.1038 `_. """ + __name__ = 'PINN' + def __init__( self, problem, model, extra_features=None, loss=torch.nn.MSELoss(), - optimizer=torch.optim.Adam, - optimizer_kwargs={"lr": 0.001}, - scheduler=ConstantLR, - scheduler_kwargs={"factor": 1, "total_iters": 0}, + optimizer=None, + scheduler=None, ): """ :param AbstractProblem problem: The formulation of the problem. @@ -82,20 +80,15 @@ def __init__( :param dict scheduler_kwargs: LR scheduler constructor keyword args. """ super().__init__( - models=[model], + models=model, problem=problem, - optimizers=[optimizer], - optimizers_kwargs=[optimizer_kwargs], + optimizers=optimizer, + schedulers=scheduler, extra_features=extra_features, loss=loss, ) - # check consistency - check_consistency(scheduler, LRScheduler, subclass=True) - check_consistency(scheduler_kwargs, dict) - # assign variables - self._scheduler = scheduler(self.optimizers[0], **scheduler_kwargs) self._neural_net = self.models[0] def forward(self, x): diff --git a/pina/solvers/solver.py b/pina/solvers/solver.py index e00bc8d5..b622546e 100644 --- a/pina/solvers/solver.py +++ b/pina/solvers/solver.py @@ -138,8 +138,8 @@ def _check_solver_consistency(self, problem): TODO """ for _, condition in problem.conditions.items(): - if not set(self.accepted_condition_types).issubset( - condition.condition_type): + if not set(condition.condition_type).issubset( + set(self.accepted_condition_types)): raise ValueError( - f'{self.__name__} support only dose not support condition ' + f'{self.__name__} dose not support condition ' f'{condition.condition_type}') diff --git a/pina/solvers/supervised.py b/pina/solvers/supervised.py index 62fc9914..b9258f7d 100644 --- a/pina/solvers/supervised.py +++ b/pina/solvers/supervised.py @@ -7,6 +7,7 @@ from ..label_tensor import LabelTensor from ..utils import check_consistency from ..loss.loss_interface import LossInterface +from ..condition import InputOutputPointsCondition class SupervisedSolver(SolverInterface): @@ -37,7 +38,7 @@ class SupervisedSolver(SolverInterface): we are seeking to approximate multiple (discretised) functions given multiple (discretised) input functions. """ - accepted_condition_types = ['supervised'] + accepted_condition_types = [InputOutputPointsCondition.condition_type[0]] __name__ = 'SupervisedSolver' def __init__(self, @@ -115,10 +116,9 @@ def training_step(self, batch, batch_idx): :return: The sum of the loss functions. :rtype: LabelTensor """ - condition_idx = batch.supervised.condition_indices + condition_idx = batch.supervised.condition_indices for condition_id in range(condition_idx.min(), condition_idx.max() + 1): - condition_name = self._dataloader.condition_names[condition_id] condition = self.problem.conditions[condition_name] pts = batch.supervised.input_points diff --git a/pina/trainer.py b/pina/trainer.py index 58c66f67..49461166 100644 --- a/pina/trainer.py +++ b/pina/trainer.py @@ -14,7 +14,7 @@ def __init__(self, batch_size=None, train_size=.7, test_size=.2, - eval_size=.1, + val_size=.1, **kwargs): """ PINA Trainer class for costumizing every aspect of training via flags. @@ -39,11 +39,12 @@ def __init__(self, check_consistency(batch_size, int) self.train_size = train_size self.test_size = test_size - self.eval_size = eval_size + self.val_size = val_size self.solver = solver self.batch_size = batch_size self._create_loader() self._move_to_device() + self.data_module = None def _move_to_device(self): device = self._accelerator_connector._parallel_devices[0] @@ -64,7 +65,7 @@ def _create_loader(self): if not self.solver.problem.collector.full: error_message = '\n'.join([ f"""{" " * 13} ---> Condition {key} {"sampled" if value else - "not sampled"}""" for key, value in + "not sampled"}""" for key, value in self._solver.problem.collector._is_conditions_ready.items() ]) raise RuntimeError('Cannot create Trainer if not all conditions ' @@ -77,20 +78,21 @@ def _create_loader(self): device = devices[0] - data_module = PinaDataModule(problem=self.solver.problem, - device=device, - train_size=self.train_size, - test_size=self.test_size, - val_size=self.eval_size) - data_module.setup() - self._loader = data_module.train_dataloader() + self.data_module = PinaDataModule(problem=self.solver.problem, + device=device, + train_size=self.train_size, + test_size=self.test_size, + val_size=self.val_size, + batch_size=self.batch_size, ) + self.data_module.setup() def train(self, **kwargs): """ Train the solver method. """ + self._create_loader() return super().fit(self.solver, - train_dataloaders=self._loader, + datamodule=self.data_module, **kwargs) @property diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 87fd9a15..c8e29cec 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -32,49 +32,49 @@ class Poisson(SpatialProblem): conditions = { 'gamma1': - Condition(domain=CartesianDomain({ - 'x': [0, 1], - 'y': 1 - }), - equation=FixedValue(0.0)), + Condition(domain=CartesianDomain({ + 'x': [0, 1], + 'y': 1 + }), + equation=FixedValue(0.0)), 'gamma2': - Condition(domain=CartesianDomain({ - 'x': [0, 1], - 'y': 0 - }), - equation=FixedValue(0.0)), + Condition(domain=CartesianDomain({ + 'x': [0, 1], + 'y': 0 + }), + equation=FixedValue(0.0)), 'gamma3': - Condition(domain=CartesianDomain({ - 'x': 1, - 'y': [0, 1] - }), - equation=FixedValue(0.0)), + Condition(domain=CartesianDomain({ + 'x': 1, + 'y': [0, 1] + }), + equation=FixedValue(0.0)), 'gamma4': - Condition(domain=CartesianDomain({ - 'x': 0, - 'y': [0, 1] - }), - equation=FixedValue(0.0)), + Condition(domain=CartesianDomain({ + 'x': 0, + 'y': [0, 1] + }), + equation=FixedValue(0.0)), 'D': - Condition(input_points=LabelTensor(torch.rand(size=(100, 2)), - ['x', 'y']), - equation=my_laplace), + Condition(input_points=LabelTensor(torch.rand(size=(100, 2)), + ['x', 'y']), + equation=my_laplace), 'data': - Condition(input_points=in_, output_points=out_), + Condition(input_points=in_, output_points=out_), 'data2': - Condition(input_points=in2_, output_points=out2_), + Condition(input_points=in2_, output_points=out2_), 'unsupervised': - Condition( - input_points=LabelTensor(torch.rand(size=(45, 2)), ['x', 'y']), - conditional_variables=LabelTensor(torch.ones(size=(45, 1)), - ['alpha']), - ), + Condition( + input_points=LabelTensor(torch.rand(size=(45, 2)), ['x', 'y']), + conditional_variables=LabelTensor(torch.ones(size=(45, 1)), + ['alpha']), + ), 'unsupervised2': - Condition( - input_points=LabelTensor(torch.rand(size=(90, 2)), ['x', 'y']), - conditional_variables=LabelTensor(torch.ones(size=(90, 1)), - ['alpha']), - ) + Condition( + input_points=LabelTensor(torch.rand(size=(90, 2)), ['x', 'y']), + conditional_variables=LabelTensor(torch.ones(size=(90, 1)), + ['alpha']), + ) } diff --git a/tests/test_label_tensor/test_label_tensor_01.py b/tests/test_label_tensor/test_label_tensor_01.py index 57aafb8c..ea43307c 100644 --- a/tests/test_label_tensor/test_label_tensor_01.py +++ b/tests/test_label_tensor/test_label_tensor_01.py @@ -114,5 +114,5 @@ def test_slice(): assert torch.allclose(tensor_view2, data[3]) tensor_view3 = tensor[:, 2] - assert tensor_view3.labels == labels[2] + assert tensor_view3.labels == [labels[2]] assert torch.allclose(tensor_view3, data[:, 2].reshape(-1, 1)) diff --git a/tests/test_solvers/test_supervised_solver.py b/tests/test_solvers/test_supervised_solver.py index 8ceadcd9..ebe8179e 100644 --- a/tests/test_solvers/test_supervised_solver.py +++ b/tests/test_solvers/test_supervised_solver.py @@ -121,7 +121,7 @@ def test_train_cpu(): batch_size=5, train_size=1, test_size=0., - eval_size=0.) + val_size=0.) trainer.train() test_train_cpu() diff --git a/tutorials/tutorial5/tutorial.ipynb b/tutorials/tutorial5/tutorial.ipynb index 64032d31..d8e98546 100644 --- a/tutorials/tutorial5/tutorial.ipynb +++ b/tutorials/tutorial5/tutorial.ipynb @@ -22,8 +22,8 @@ "id": "5f2744dc", "metadata": { "ExecuteTime": { - "end_time": "2024-09-19T13:35:28.837348Z", - "start_time": "2024-09-19T13:35:27.611334Z" + "end_time": "2024-10-31T13:30:49.886194Z", + "start_time": "2024-10-31T13:30:48.624214Z" } }, "source": [ @@ -61,8 +61,8 @@ "id": "2ffb8a4c", "metadata": { "ExecuteTime": { - "end_time": "2024-09-19T13:35:28.989631Z", - "start_time": "2024-09-19T13:35:28.952744Z" + "end_time": "2024-10-31T13:30:49.920642Z", + "start_time": "2024-10-31T13:30:49.888843Z" } }, "source": [ @@ -70,14 +70,18 @@ "data = io.loadmat(\"Data_Darcy.mat\")\n", "\n", "# extract data (we use only 100 data for train)\n", - "k_train = LabelTensor(torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1), \n", - " labels={3:{'dof': ['u0'], 'name': 'k_train'}})\n", - "u_train = LabelTensor(torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1),\n", - " labels={3:{'dof': ['u'], 'name': 'u_train'}})\n", - "k_test = LabelTensor(torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1),\n", - " labels={3:{'dof': ['u0'], 'name': 'k_test'}})\n", - "u_test= LabelTensor(torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1),\n", - " labels={3:{'dof': ['u'], 'name': 'u_test'}})\n", + "k_train = LabelTensor(\n", + " torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1),\n", + " labels={3: {'dof': ['u0'], 'name': 'k_train'}})\n", + "u_train = LabelTensor(\n", + " torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1),\n", + " labels={3: {'dof': ['u'], 'name': 'u_train'}})\n", + "k_test = LabelTensor(\n", + " torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1),\n", + " labels={3: {'dof': ['u0'], 'name': 'k_test'}})\n", + "u_test = LabelTensor(\n", + " torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1),\n", + " labels={3: {'dof': ['u'], 'name': 'u_test'}})\n", "x = torch.tensor(data['x'], dtype=torch.float)[0]\n", "y = torch.tensor(data['y'], dtype=torch.float)[0]" ], @@ -97,8 +101,8 @@ "id": "c8501b6f", "metadata": { "ExecuteTime": { - "end_time": "2024-09-19T13:35:29.108381Z", - "start_time": "2024-09-19T13:35:29.031076Z" + "end_time": "2024-10-31T13:30:50.101674Z", + "start_time": "2024-10-31T13:30:50.034859Z" } }, "source": [ @@ -124,32 +128,6 @@ ], "execution_count": 3 }, - { - "cell_type": "code", - "id": "082ab7a8-22e0-498b-b138-158dc9f2658f", - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:35:29.122858Z", - "start_time": "2024-09-19T13:35:29.119985Z" - } - }, - "source": [ - "u_train.labels[3]['dof']" - ], - "outputs": [ - { - "data": { - "text/plain": [ - "['u']" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "execution_count": 4 - }, { "cell_type": "markdown", "id": "89a77ff1", @@ -163,33 +141,23 @@ "id": "8b27d283", "metadata": { "ExecuteTime": { - "end_time": "2024-09-19T13:35:29.136572Z", - "start_time": "2024-09-19T13:35:29.134124Z" + "end_time": "2024-10-31T13:30:50.191764Z", + "start_time": "2024-10-31T13:30:50.189977Z" } }, "source": [ "class NeuralOperatorSolver(AbstractProblem):\n", - " input_variables = k_train.labels[3]['dof']\n", - " output_variables = u_train.labels[3]['dof']\n", - " domains = {\n", - " 'pts': k_train\n", - " }\n", - " conditions = {'data' : Condition(domain='pts', \n", - " output_points=u_train)}\n", + " input_variables = k_train.labels\n", + " output_variables = u_train.labels\n", + " conditions = {'data': Condition(input_points=k_train,\n", + " output_points=u_train)}\n", + "\n", "\n", "# make problem\n", "problem = NeuralOperatorSolver()" ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "execution_count": 5 + "outputs": [], + "execution_count": 4 }, { "cell_type": "markdown", @@ -206,20 +174,21 @@ "id": "e34f18b0", "metadata": { "ExecuteTime": { - "end_time": "2024-09-19T13:35:31.245429Z", - "start_time": "2024-09-19T13:35:29.154937Z" + "end_time": "2024-10-31T13:30:52.528635Z", + "start_time": "2024-10-31T13:30:50.225049Z" } }, "source": [ "# make model\n", "model = FeedForward(input_dimensions=1, output_dimensions=1)\n", "\n", - "\n", "# make solver\n", "solver = SupervisedSolver(problem=problem, model=model)\n", "\n", "# make the trainer and train\n", - "trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) \n", + "trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu',\n", + " enable_model_summary=False, batch_size=10, train_size=1.,\n", + " test_size=0., val_size=0.)\n", "# We train on CPU and avoid model summary at the beginning of training (optional)\n", "trainer.train()" ], @@ -231,14 +200,15 @@ "GPU available: True (mps), used: False\n", "TPU available: False, using: 0 TPU cores\n", "HPU available: False, using: 0 HPUs\n", - "/Users/filippoolivo/miniconda3/envs/PINAv0.2/lib/python3.11/site-packages/pytorch_lightning/trainer/setup.py:177: GPU available but not used. You can set it by doing `Trainer(accelerator='gpu')`.\n" + "/Users/filippoolivo/miniconda3/envs/PINAv0.2-test/lib/python3.11/site-packages/pytorch_lightning/trainer/setup.py:177: GPU available but not used. You can set it by doing `Trainer(accelerator='gpu')`.\n", + "/Users/filippoolivo/miniconda3/envs/PINAv0.2-test/lib/python3.11/site-packages/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py:75: Starting from v1.9.0, `tensorboardX` has been removed as a dependency of the `pytorch_lightning` package, due to potential conflicts with other packages in the ML ecosystem. For this reason, `logger=True` will use `CSVLogger` as the default logger, unless the `tensorboard` or `tensorboardX` packages are found. Please `pip install lightning[extra]` or one of them to enable TensorBoard support by default\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Epoch 9: 100%|██████████| 100/100 [00:00<00:00, 552.80it/s, v_num=18, mean_loss=0.113]" + "Epoch 9: 100%|██████████| 100/100 [00:00<00:00, 459.79it/s, v_num=58, mean_loss=0.108]" ] }, { @@ -252,11 +222,11 @@ "name": "stdout", "output_type": "stream", "text": [ - "Epoch 9: 100%|██████████| 100/100 [00:00<00:00, 547.37it/s, v_num=18, mean_loss=0.113]\n" + "Epoch 9: 100%|██████████| 100/100 [00:00<00:00, 456.20it/s, v_num=58, mean_loss=0.108]\n" ] } ], - "execution_count": 6 + "execution_count": 5 }, { "cell_type": "markdown", @@ -271,8 +241,8 @@ "id": "0e2a6aa4", "metadata": { "ExecuteTime": { - "end_time": "2024-09-19T13:35:31.295336Z", - "start_time": "2024-09-19T13:35:31.256308Z" + "end_time": "2024-10-31T13:30:52.600181Z", + "start_time": "2024-10-31T13:30:52.564456Z" } }, "source": [ @@ -282,10 +252,12 @@ "metric_err = LpLoss(relative=True)\n", "\n", "model = solver.models[0]\n", - "err = float(metric_err(u_train.squeeze(-1), model(k_train).squeeze(-1)).mean())*100\n", + "err = float(\n", + " metric_err(u_train.squeeze(-1), model(k_train).squeeze(-1)).mean()) * 100\n", "print(f'Final error training {err:.2f}%')\n", "\n", - "err = float(metric_err(u_test.squeeze(-1), model(k_test).squeeze(-1)).mean())*100\n", + "err = float(\n", + " metric_err(u_test.squeeze(-1), model(k_test).squeeze(-1)).mean()) * 100\n", "print(f'Final error testing {err:.2f}%')" ], "outputs": [ @@ -294,11 +266,11 @@ "output_type": "stream", "text": [ "Final error training 56.05%\n", - "Final error testing 55.95%\n" + "Final error testing 56.02%\n" ] } ], - "execution_count": 7 + "execution_count": 6 }, { "cell_type": "markdown", @@ -315,8 +287,8 @@ "id": "9af523a5", "metadata": { "ExecuteTime": { - "end_time": "2024-09-19T13:35:44.717807Z", - "start_time": "2024-09-19T13:35:31.306689Z" + "end_time": "2024-10-31T13:31:06.537460Z", + "start_time": "2024-10-31T13:30:52.694424Z" } }, "source": [ @@ -330,12 +302,14 @@ " inner_size=24,\n", " padding=8)\n", "\n", - "\n", "# make solver\n", "solver = SupervisedSolver(problem=problem, model=model)\n", "\n", "# make the trainer and train\n", - "trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)\n", + "trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu',\n", + " enable_model_summary=False, batch_size=10, train_size=1.,\n", + " test_size=0.,\n", + " val_size=0.) # we train on CPU and avoid model summary at beginning of training (optional)\n", "trainer.train()" ], "outputs": [ @@ -352,7 +326,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Epoch 9: 100%|██████████| 100/100 [00:01<00:00, 73.04it/s, v_num=19, mean_loss=0.00215]" + "Epoch 9: 100%|██████████| 100/100 [00:01<00:00, 72.88it/s, v_num=59, mean_loss=0.00291]" ] }, { @@ -366,11 +340,11 @@ "name": "stdout", "output_type": "stream", "text": [ - "Epoch 9: 100%|██████████| 100/100 [00:01<00:00, 72.84it/s, v_num=19, mean_loss=0.00215]\n" + "Epoch 9: 100%|██████████| 100/100 [00:01<00:00, 72.69it/s, v_num=59, mean_loss=0.00291]\n" ] } ], - "execution_count": 8 + "execution_count": 7 }, { "cell_type": "markdown", @@ -385,17 +359,19 @@ "id": "58e2db89", "metadata": { "ExecuteTime": { - "end_time": "2024-09-19T13:35:45.259819Z", - "start_time": "2024-09-19T13:35:44.729042Z" + "end_time": "2024-10-31T13:31:07.134298Z", + "start_time": "2024-10-31T13:31:06.657198Z" } }, "source": [ "model = solver.models[0]\n", "\n", - "err = float(metric_err(u_train.squeeze(-1), model(k_train).squeeze(-1)).mean())*100\n", + "err = float(\n", + " metric_err(u_train.squeeze(-1), model(k_train).squeeze(-1)).mean()) * 100\n", "print(f'Final error training {err:.2f}%')\n", "\n", - "err = float(metric_err(u_test.squeeze(-1), model(k_test).squeeze(-1)).mean())*100\n", + "err = float(\n", + " metric_err(u_test.squeeze(-1), model(k_test).squeeze(-1)).mean()) * 100\n", "print(f'Final error testing {err:.2f}%')" ], "outputs": [ @@ -403,12 +379,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "Final error training 7.48%\n", - "Final error testing 7.73%\n" + "Final error training 7.21%\n", + "Final error testing 7.39%\n" ] } ], - "execution_count": 9 + "execution_count": 8 }, { "cell_type": "markdown", @@ -431,8 +407,8 @@ { "metadata": { "ExecuteTime": { - "end_time": "2024-09-19T13:08:35.195331Z", - "start_time": "2024-09-19T13:08:35.193830Z" + "end_time": "2024-10-31T13:31:07.231206Z", + "start_time": "2024-10-31T13:31:07.230017Z" } }, "cell_type": "code",