diff --git a/nalp/core/corpus.py b/nalp/core/corpus.py index b27cbb5..3fae433 100644 --- a/nalp/core/corpus.py +++ b/nalp/core/corpus.py @@ -2,7 +2,7 @@ """ from collections import Counter -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List import nalp.utils.constants as c import nalp.utils.preprocess as p @@ -21,7 +21,7 @@ class Corpus: """ - def __init__(self, min_frequency: Optional[int] = 1) -> None: + def __init__(self, min_frequency: int = 1) -> None: """Initialization method.""" self.min_frequency = min_frequency diff --git a/nalp/core/dataset.py b/nalp/core/dataset.py index fd18a0f..38e33c1 100644 --- a/nalp/core/dataset.py +++ b/nalp/core/dataset.py @@ -1,8 +1,6 @@ """Dataset-related class. """ -from typing import Optional - import tensorflow as tf import nalp.utils.constants as c @@ -14,7 +12,7 @@ class Dataset: """ - def __init__(self, shuffle: Optional[bool] = True) -> None: + def __init__(self, shuffle: bool = True) -> None: """Initialization method. Args: diff --git a/nalp/core/model.py b/nalp/core/model.py index 0b03525..b541704 100644 --- a/nalp/core/model.py +++ b/nalp/core/model.py @@ -1,7 +1,7 @@ """Model-related classes. """ -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List import tensorflow as tf from tensorflow.keras import Model @@ -20,7 +20,7 @@ class Discriminator(Model): """ - def __init__(self, name: Optional[str] = "") -> None: + def __init__(self, name: str = "") -> None: """Initialization method. Note that basic variables shared by all childs should be declared here, e.g., layers. @@ -32,7 +32,7 @@ def __init__(self, name: Optional[str] = "") -> None: super(Discriminator, self).__init__(name=name) - def call(self, x: tf.Tensor, training: Optional[bool] = True) -> None: + def call(self, x: tf.Tensor, training: bool = True) -> None: """Method that holds vital information whenever this class is called. Note that you will need to implement this method directly on its child. Essentially, @@ -56,7 +56,7 @@ class Generator(Model): """ - def __init__(self, name: Optional[str] = "") -> None: + def __init__(self, name: str = "") -> None: """Initialization method. Note that basic variables shared by all childs should be declared here, e.g., layers. @@ -68,7 +68,7 @@ def __init__(self, name: Optional[str] = "") -> None: super(Generator, self).__init__(name=name) - def call(self, x: tf.Tensor, training: Optional[bool] = True) -> None: + def call(self, x: tf.Tensor, training: bool = True) -> None: """Method that holds vital information whenever this class is called. Note that you will need to implement this method directly on its child. Essentially, @@ -86,7 +86,7 @@ def call(self, x: tf.Tensor, training: Optional[bool] = True) -> None: raise NotImplementedError def generate_greedy_search( - self, start: str, max_length: Optional[int] = 100 + self, start: str, max_length: int = 100 ) -> List[str]: """Generates text by using greedy search, where the sampled token is always sampled according to the maximum probability. @@ -125,8 +125,8 @@ def generate_greedy_search( def generate_temperature_sampling( self, start: str, - max_length: Optional[int] = 100, - temperature: Optional[float] = 1.0, + max_length: int = 100, + temperature: float = 1.0, ) -> List[str]: """Generates text by using temperature sampling, where the sampled token is sampled according to a multinomial/categorical distribution. @@ -168,9 +168,9 @@ def generate_temperature_sampling( def generate_top_sampling( self, start: str, - max_length: Optional[int] = 100, - k: Optional[int] = 0, - p: Optional[float] = 0.0, + max_length: int = 100, + k: int = 0, + p: float = 0.0, ) -> List[str]: """Generates text by using top-k and top-p sampling, where the sampled token is sampled according to the `k` most likely words distribution, as well @@ -240,7 +240,7 @@ def __init__( self, discriminator: Discriminator, generator: Generator, - name: Optional[str] = "", + name: str = "", ) -> None: """Initialization method. @@ -371,7 +371,7 @@ def step(self, x: tf.Tensor) -> None: self.G_loss.update_state(G_loss) self.D_loss.update_state(D_loss) - def fit(self, batches: Dataset, epochs: Optional[int] = 100) -> None: + def fit(self, batches: Dataset, epochs: int = 100) -> None: """Trains the model. Args: diff --git a/nalp/corpus/audio.py b/nalp/corpus/audio.py index e0bad47..42f98fd 100644 --- a/nalp/corpus/audio.py +++ b/nalp/corpus/audio.py @@ -1,8 +1,6 @@ """Audio-related corpus. """ -from typing import Optional - from nalp.core import Corpus from nalp.utils import loader, logging @@ -17,7 +15,7 @@ class AudioCorpus(Corpus): """ - def __init__(self, from_file: str, min_frequency: Optional[int] = 1) -> None: + def __init__(self, from_file: str, min_frequency: int = 1) -> None: """Initialization method. Args: diff --git a/nalp/corpus/sentence.py b/nalp/corpus/sentence.py index 5812996..459cb9a 100644 --- a/nalp/corpus/sentence.py +++ b/nalp/corpus/sentence.py @@ -24,10 +24,10 @@ def __init__( self, tokens: Optional[List[str]] = None, from_file: Optional[str] = None, - corpus_type: Optional[str] = "char", - min_frequency: Optional[int] = 1, - max_pad_length: Optional[int] = None, - sos_eos_tokens: Optional[bool] = True, + corpus_type: str = "char", + min_frequency: int = 1, + max_pad_length: int = None, + sos_eos_tokens: bool = True, ) -> None: """Initialization method. diff --git a/nalp/corpus/text.py b/nalp/corpus/text.py index 6f8cc9b..3573930 100644 --- a/nalp/corpus/text.py +++ b/nalp/corpus/text.py @@ -21,8 +21,8 @@ def __init__( self, tokens: Optional[List[str]] = None, from_file: Optional[str] = None, - corpus_type: Optional[str] = "char", - min_frequency: Optional[int] = 1, + corpus_type: str = "char", + min_frequency: int = 1, ) -> None: """Initialization method. diff --git a/nalp/datasets/image.py b/nalp/datasets/image.py index 83a65fb..d0260c6 100644 --- a/nalp/datasets/image.py +++ b/nalp/datasets/image.py @@ -21,10 +21,10 @@ class ImageDataset(Dataset): def __init__( self, images: np.array, - batch_size: Optional[int] = 256, + batch_size: int = 256, shape: Optional[Tuple[int, int]] = None, - normalize: Optional[bool] = True, - shuffle: Optional[bool] = True, + normalize: bool = True, + shuffle: bool = True, ) -> None: """Initialization method. diff --git a/nalp/datasets/language_modeling.py b/nalp/datasets/language_modeling.py index d5daa98..0f0325f 100644 --- a/nalp/datasets/language_modeling.py +++ b/nalp/datasets/language_modeling.py @@ -1,7 +1,7 @@ """Language modeling dataset class. """ -from typing import Optional, Tuple +from typing import Tuple import numpy as np import tensorflow as tf @@ -21,9 +21,9 @@ class LanguageModelingDataset(Dataset): def __init__( self, encoded_tokens: np.array, - max_contiguous_pad_length: Optional[int] = 1, - batch_size: Optional[int] = 64, - shuffle: Optional[bool] = True, + max_contiguous_pad_length: int = 1, + batch_size: int = 64, + shuffle: bool = True, ) -> None: """Initialization method. diff --git a/nalp/encoders/word2vec.py b/nalp/encoders/word2vec.py index d4e7fdd..af57d25 100644 --- a/nalp/encoders/word2vec.py +++ b/nalp/encoders/word2vec.py @@ -2,7 +2,7 @@ """ import multiprocessing -from typing import List, Optional +from typing import List import numpy as np from gensim.models.word2vec import Word2Vec as W2V @@ -31,12 +31,12 @@ def __init__(self) -> None: def learn( self, tokens: List[str], - max_features: Optional[int] = 128, - window_size: Optional[int] = 5, - min_count: Optional[int] = 1, - algorithm: Optional[bool] = 0, - learning_rate: Optional[float] = 0.01, - iterations: Optional[int] = 1000, + max_features: int = 128, + window_size: int = 5, + min_count: int = 1, + algorithm: int = 0, + learning_rate: float = 0.01, + iterations: int = 1000, ): """Learns a Word2Vec representation based on the its methodology. diff --git a/nalp/models/dcgan.py b/nalp/models/dcgan.py index f001783..75d5faf 100644 --- a/nalp/models/dcgan.py +++ b/nalp/models/dcgan.py @@ -1,7 +1,7 @@ """Deep Convolutional Generative Adversarial Network. """ -from typing import Optional, Tuple +from typing import Tuple from nalp.core import Adversarial from nalp.models.discriminators import ConvDiscriminator @@ -23,11 +23,11 @@ class DCGAN(Adversarial): def __init__( self, - input_shape: Optional[Tuple[int, int, int]] = (28, 28, 1), - noise_dim: Optional[int] = 100, - n_samplings: Optional[int] = 3, - alpha: Optional[float] = 0.3, - dropout_rate: Optional[float] = 0.3, + input_shape: Tuple[int, int, int] = (28, 28, 1), + noise_dim: int = 100, + n_samplings: int = 3, + alpha: float = 0.3, + dropout_rate: float = 0.3, ) -> None: """Initialization method. diff --git a/nalp/models/discriminators/conv.py b/nalp/models/discriminators/conv.py index e1a35b0..55d0fb4 100644 --- a/nalp/models/discriminators/conv.py +++ b/nalp/models/discriminators/conv.py @@ -1,8 +1,6 @@ """Convolutional discriminator. """ -from typing import Optional - import tensorflow as tf from tensorflow.keras.layers import Conv2D, Dense, Dropout @@ -20,9 +18,9 @@ class ConvDiscriminator(Discriminator): def __init__( self, - n_samplings: Optional[int] = 3, - alpha: Optional[float] = 0.3, - dropout_rate: Optional[float] = 0.3, + n_samplings: int = 3, + alpha: float = 0.3, + dropout_rate: float = 0.3, ) -> None: """Initialization method. @@ -64,7 +62,7 @@ def alpha(self) -> float: def alpha(self, alpha: float) -> None: self._alpha = alpha - def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor: + def call(self, x: tf.Tensor, training: bool = True) -> tf.Tensor: """Method that holds vital information whenever this class is called. Args: diff --git a/nalp/models/discriminators/embedded_text.py b/nalp/models/discriminators/embedded_text.py index ce605d9..907749f 100644 --- a/nalp/models/discriminators/embedded_text.py +++ b/nalp/models/discriminators/embedded_text.py @@ -1,7 +1,7 @@ """Embedded-text discriminator. """ -from typing import Optional, Tuple +from typing import Tuple import tensorflow as tf from tensorflow.keras.layers import Conv2D, Dense, Dropout, Embedding, MaxPool1D @@ -20,12 +20,12 @@ class EmbeddedTextDiscriminator(Discriminator): def __init__( self, - vocab_size: Optional[int] = 1, - max_length: Optional[int] = 1, - embedding_size: Optional[int] = 32, - n_filters: Optional[Tuple[int, ...]] = (64), - filters_size: Optional[Tuple[int, ...]] = (1), - dropout_rate: Optional[float] = 0.25, + vocab_size: int = 1, + max_length: int = 1, + embedding_size: int = 32, + n_filters: Tuple[int, ...] = (64), + filters_size: Tuple[int, ...] = (1), + dropout_rate: float = 0.25, ) -> None: """Initialization method. @@ -68,7 +68,7 @@ def __init__( logger.info("Class overrided.") - def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor: + def call(self, x: tf.Tensor, training: bool = True) -> tf.Tensor: """Method that holds vital information whenever this class is called. Args: diff --git a/nalp/models/discriminators/linear.py b/nalp/models/discriminators/linear.py index 1573873..1b415ad 100644 --- a/nalp/models/discriminators/linear.py +++ b/nalp/models/discriminators/linear.py @@ -1,8 +1,6 @@ """Linear discriminator. """ -from typing import Optional - import tensorflow as tf from tensorflow.keras.layers import Dense @@ -19,7 +17,7 @@ class LinearDiscriminator(Discriminator): """ def __init__( - self, n_samplings: Optional[int] = 3, alpha: Optional[float] = 0.01 + self, n_samplings: int = 3, alpha: float = 0.01 ) -> None: """Initialization method. @@ -53,7 +51,7 @@ def alpha(self) -> float: def alpha(self, alpha: float) -> None: self._alpha = alpha - def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor: + def call(self, x: tf.Tensor, training: bool = True) -> tf.Tensor: """Method that holds vital information whenever this class is called. Args: diff --git a/nalp/models/discriminators/lstm.py b/nalp/models/discriminators/lstm.py index e55e703..8a38fb9 100644 --- a/nalp/models/discriminators/lstm.py +++ b/nalp/models/discriminators/lstm.py @@ -1,8 +1,6 @@ """Long Short-Term Memory discriminator. """ -from typing import Optional - import tensorflow as tf from tensorflow.keras.layers import RNN, Dense, LSTMCell @@ -22,7 +20,7 @@ class LSTMDiscriminator(Discriminator): """ def __init__( - self, embedding_size: Optional[int] = 32, hidden_size: Optional[int] = 64 + self, embedding_size: int = 32, hidden_size: int = 64 ) -> None: """Initialization method. diff --git a/nalp/models/discriminators/text.py b/nalp/models/discriminators/text.py index 0d531f1..d763403 100644 --- a/nalp/models/discriminators/text.py +++ b/nalp/models/discriminators/text.py @@ -1,7 +1,7 @@ """Text-based discriminator. """ -from typing import Optional, Tuple +from typing import Tuple import tensorflow as tf from tensorflow.keras.layers import Conv2D, Dense, Dropout, MaxPool1D @@ -20,11 +20,11 @@ class TextDiscriminator(Discriminator): def __init__( self, - max_length: Optional[int] = 1, - embedding_size: Optional[int] = 32, - n_filters: Optional[Tuple[int, ...]] = (64), - filters_size: Optional[Tuple[int, ...]] = (1), - dropout_rate: Optional[float] = 0.25, + max_length: int = 1, + embedding_size: int = 32, + n_filters: Tuple[int, ...] = (64), + filters_size: Tuple[int, ...] = (1), + dropout_rate: float = 0.25, ) -> None: """Initialization method. @@ -64,7 +64,7 @@ def __init__( logger.info("Class overrided.") - def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor: + def call(self, x: tf.Tensor, training: bool = True) -> tf.Tensor: """Method that holds vital information whenever this class is called. Args: diff --git a/nalp/models/gan.py b/nalp/models/gan.py index 32601b6..96704d5 100644 --- a/nalp/models/gan.py +++ b/nalp/models/gan.py @@ -1,7 +1,7 @@ """Generative Adversarial Network. """ -from typing import Optional, Tuple +from typing import Tuple from nalp.core import Adversarial from nalp.models.discriminators import LinearDiscriminator @@ -22,10 +22,10 @@ class GAN(Adversarial): def __init__( self, - input_shape: Optional[Tuple[int, ...]] = (784,), - noise_dim: Optional[int] = 100, - n_samplings: Optional[int] = 3, - alpha: Optional[float] = 0.01, + input_shape: Tuple[int, ...] = (784,), + noise_dim: int = 100, + n_samplings: int = 3, + alpha: float = 0.01, ) -> None: """Initialization method. diff --git a/nalp/models/generators/bi_lstm.py b/nalp/models/generators/bi_lstm.py index 66b79d7..80848e1 100644 --- a/nalp/models/generators/bi_lstm.py +++ b/nalp/models/generators/bi_lstm.py @@ -25,9 +25,9 @@ class BiLSTMGenerator(Generator): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - embedding_size: Optional[int] = 32, - hidden_size: Optional[int] = 64, + vocab_size: int = 1, + embedding_size: int = 32, + hidden_size: int = 64, ) -> None: """Initialization method. diff --git a/nalp/models/generators/conv.py b/nalp/models/generators/conv.py index 2df61ae..1735c30 100644 --- a/nalp/models/generators/conv.py +++ b/nalp/models/generators/conv.py @@ -1,7 +1,7 @@ """Convolutional generator. """ -from typing import Optional, Tuple +from typing import Tuple import tensorflow as tf from tensorflow.keras.layers import BatchNormalization, Conv2DTranspose, Dense @@ -20,10 +20,10 @@ class ConvGenerator(Generator): def __init__( self, - input_shape: Optional[Tuple[int, int, int]] = (28, 28, 1), - noise_dim: Optional[int] = 100, - n_samplings: Optional[int] = 3, - alpha: Optional[float] = 0.3, + input_shape: Tuple[int, int, int] = (28, 28, 1), + noise_dim: int = 100, + n_samplings: int = 3, + alpha: float = 0.3, ) -> None: """Initialization method. @@ -134,7 +134,7 @@ def filter_size(self) -> int: def filter_size(self, filter_size: int) -> None: self._filter_size = filter_size - def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor: + def call(self, x: tf.Tensor, training: bool = True) -> tf.Tensor: """Method that holds vital information whenever this class is called. Args: diff --git a/nalp/models/generators/gru.py b/nalp/models/generators/gru.py index 942d80e..3d8d6f5 100644 --- a/nalp/models/generators/gru.py +++ b/nalp/models/generators/gru.py @@ -27,9 +27,9 @@ class GRUGenerator(Generator): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - embedding_size: Optional[int] = 32, - hidden_size: Optional[int] = 64, + vocab_size: int = 1, + embedding_size: int = 32, + hidden_size: int = 64, ) -> None: """Initialization method. diff --git a/nalp/models/generators/gumbel_lstm.py b/nalp/models/generators/gumbel_lstm.py index 9ba711d..32be859 100644 --- a/nalp/models/generators/gumbel_lstm.py +++ b/nalp/models/generators/gumbel_lstm.py @@ -23,10 +23,10 @@ class GumbelLSTMGenerator(LSTMGenerator): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - embedding_size: Optional[int] = 32, - hidden_size: Optional[int] = 64, - tau: Optional[float] = 5.0, + vocab_size: int = 1, + embedding_size: int = 32, + hidden_size: int = 64, + tau: float = 5.0, ) -> None: """Initialization method. @@ -81,7 +81,7 @@ def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: return x, x_g, y_g def generate_greedy_search( - self, start: str, max_length: Optional[int] = 100 + self, start: str, max_length: int = 100 ) -> List[str]: """Generates text by using greedy search, where the sampled token is always sampled according to the maximum probability. @@ -120,8 +120,8 @@ def generate_greedy_search( def generate_temperature_sampling( self, start: str, - max_length: Optional[int] = 100, - temperature: Optional[float] = 1.0, + max_length: int = 100, + temperature: float = 1.0, ): """Generates text by using temperature sampling, where the sampled token is sampled according to a multinomial/categorical distribution. @@ -165,9 +165,9 @@ def generate_temperature_sampling( def generate_top_sampling( self, start: str, - max_length: Optional[int] = 100, - k: Optional[int] = 0, - p: Optional[float] = 0.0, + max_length: int = 100, + k: int = 0, + p: float = 0.0, ): """Generates text by using top-k and top-p sampling, where the sampled token is sampled according to the `k` most likely words distribution, as well diff --git a/nalp/models/generators/gumbel_rmc.py b/nalp/models/generators/gumbel_rmc.py index 21e6e53..6b06721 100644 --- a/nalp/models/generators/gumbel_rmc.py +++ b/nalp/models/generators/gumbel_rmc.py @@ -23,14 +23,14 @@ class GumbelRMCGenerator(RMCGenerator): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - embedding_size: Optional[int] = 32, - n_slots: Optional[int] = 3, - n_heads: Optional[int] = 5, - head_size: Optional[int] = 10, - n_blocks: Optional[int] = 1, - n_layers: Optional[int] = 3, - tau: Optional[float] = 5, + vocab_size: int = 1, + embedding_size: int = 32, + n_slots: int = 3, + n_heads: int = 5, + head_size: int = 10, + n_blocks: int = 1, + n_layers: int = 3, + tau: float = 5, ): """Initialization method. @@ -96,7 +96,7 @@ def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: return x, x_g, y_g def generate_greedy_search( - self, start: str, max_length: Optional[int] = 100 + self, start: str, max_length: int = 100 ) -> List[str]: """Generates text by using greedy search, where the sampled token is always sampled according to the maximum probability. @@ -135,8 +135,8 @@ def generate_greedy_search( def generate_temperature_sampling( self, start: str, - max_length: Optional[int] = 100, - temperature: Optional[float] = 1.0, + max_length: int = 100, + temperature: float = 1.0, ): """Generates text by using temperature sampling, where the sampled token is sampled according to a multinomial/categorical distribution. @@ -180,9 +180,9 @@ def generate_temperature_sampling( def generate_top_sampling( self, start: str, - max_length: Optional[int] = 100, - k: Optional[int] = 0, - p: Optional[float] = 0.0, + max_length: int = 100, + k: int = 0, + p: float = 0.0, ): """Generates text by using top-k and top-p sampling, where the sampled token is sampled according to the `k` most likely words distribution, as well diff --git a/nalp/models/generators/linear.py b/nalp/models/generators/linear.py index cedc372..2b80bbe 100644 --- a/nalp/models/generators/linear.py +++ b/nalp/models/generators/linear.py @@ -1,7 +1,7 @@ """Linear generator. """ -from typing import Optional, Tuple +from typing import Tuple import tensorflow as tf from tensorflow.keras.layers import Dense @@ -20,10 +20,10 @@ class LinearGenerator(Generator): def __init__( self, - input_shape: Optional[Tuple[int, ...]] = (784,), - noise_dim: Optional[int] = 100, - n_samplings: Optional[int] = 3, - alpha: Optional[float] = 0.01, + input_shape: Tuple[int, ...] = (784,), + noise_dim: int = 100, + n_samplings: int = 3, + alpha: float = 0.01, ) -> None: """Initialization method. @@ -70,7 +70,7 @@ def noise_dim(self) -> int: def noise_dim(self, noise_dim: int) -> None: self._noise_dim = noise_dim - def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor: + def call(self, x: tf.Tensor, training: bool = True) -> tf.Tensor: """Method that holds vital information whenever this class is called. Args: diff --git a/nalp/models/generators/lstm.py b/nalp/models/generators/lstm.py index c2ae3c9..0ec9b80 100644 --- a/nalp/models/generators/lstm.py +++ b/nalp/models/generators/lstm.py @@ -25,9 +25,9 @@ class LSTMGenerator(Generator): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - embedding_size: Optional[int] = 32, - hidden_size: Optional[int] = 64, + vocab_size: int = 1, + embedding_size: int = 32, + hidden_size: int = 64, ) -> None: """Initialization method. diff --git a/nalp/models/generators/rmc.py b/nalp/models/generators/rmc.py index a0414cc..19f3b3d 100644 --- a/nalp/models/generators/rmc.py +++ b/nalp/models/generators/rmc.py @@ -1,5 +1,6 @@ """Relational Memory Core generator. """ + from typing import Optional import tensorflow as tf @@ -26,13 +27,13 @@ class RMCGenerator(Generator): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - embedding_size: Optional[int] = 32, - n_slots: Optional[int] = 3, - n_heads: Optional[int] = 5, - head_size: Optional[int] = 10, - n_blocks: Optional[int] = 1, - n_layers: Optional[int] = 3, + vocab_size: int = 1, + embedding_size: int = 32, + n_slots: int = 3, + n_heads: int = 5, + head_size: int = 10, + n_blocks: int = 1, + n_layers: int = 3, ) -> None: """Initialization method. diff --git a/nalp/models/generators/rnn.py b/nalp/models/generators/rnn.py index b5dd85d..a33c2cc 100644 --- a/nalp/models/generators/rnn.py +++ b/nalp/models/generators/rnn.py @@ -25,9 +25,9 @@ class RNNGenerator(Generator): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - embedding_size: Optional[int] = 32, - hidden_size: Optional[int] = 64, + vocab_size: int = 1, + embedding_size: int = 32, + hidden_size: int = 64, ) -> None: """Initialization method. diff --git a/nalp/models/generators/stacked_rnn.py b/nalp/models/generators/stacked_rnn.py index 643bfe6..2122849 100644 --- a/nalp/models/generators/stacked_rnn.py +++ b/nalp/models/generators/stacked_rnn.py @@ -25,9 +25,9 @@ class StackedRNNGenerator(Generator): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - embedding_size: Optional[int] = 32, - hidden_size: Optional[Tuple[int, ...]] = (64, 64), + vocab_size: int = 1, + embedding_size: int = 32, + hidden_size: Tuple[int, ...] = (64, 64), ) -> None: """Initialization method. diff --git a/nalp/models/gsgan.py b/nalp/models/gsgan.py index 3b53c3e..1521d83 100644 --- a/nalp/models/gsgan.py +++ b/nalp/models/gsgan.py @@ -30,10 +30,10 @@ class GSGAN(Adversarial): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - embedding_size: Optional[int] = 32, - hidden_size: Optional[int] = 64, - tau: Optional[float] = 5, + vocab_size: int = 1, + embedding_size: int = 32, + hidden_size: int = 64, + tau: float = 5, ) -> None: """Initialization method. @@ -226,7 +226,7 @@ def step(self, x: tf.Tensor, y: tf.Tensor) -> None: self.D_loss.update_state(D_loss) self.G_loss.update_state(G_loss) - def pre_fit(self, batches: Dataset, epochs: Optional[int] = 100) -> None: + def pre_fit(self, batches: Dataset, epochs: int = 100) -> None: """Pre-trains the model. Args: @@ -255,7 +255,7 @@ def pre_fit(self, batches: Dataset, epochs: Optional[int] = 100) -> None: logger.to_file("Loss(G): %s", self.G_loss.result().numpy()) - def fit(self, batches: Dataset, epochs: Optional[int] = 100) -> None: + def fit(self, batches: Dataset, epochs: int = 100) -> None: """Trains the model. Args: diff --git a/nalp/models/layers/gumbel_softmax.py b/nalp/models/layers/gumbel_softmax.py index 7eb71a5..e844df9 100644 --- a/nalp/models/layers/gumbel_softmax.py +++ b/nalp/models/layers/gumbel_softmax.py @@ -1,7 +1,7 @@ """Gumbel-Softmax layer. """ -from typing import Any, Dict, Optional, Tuple +from typing import Any, Dict, Tuple import tensorflow as tf from tensorflow.keras.layers import Layer @@ -37,7 +37,7 @@ class GumbelSoftmax(Layer): """ - def __init__(self, axis: Optional[int] = -1, **kwargs) -> None: + def __init__(self, axis: int = -1, **kwargs) -> None: """Initialization method. Args: diff --git a/nalp/models/layers/relational_memory_cell.py b/nalp/models/layers/relational_memory_cell.py index f19fe20..e788b6a 100644 --- a/nalp/models/layers/relational_memory_cell.py +++ b/nalp/models/layers/relational_memory_cell.py @@ -27,14 +27,14 @@ def __init__( n_slots: int, n_heads: int, head_size: int, - n_blocks: Optional[int] = 1, - n_layers: Optional[int] = 3, - activation: Optional[str] = "tanh", - recurrent_activation: Optional[str] = "hard_sigmoid", - forget_bias: Optional[float] = 1.0, - kernel_initializer: Optional[str] = "glorot_uniform", - recurrent_initializer: Optional[str] = "orthogonal", - bias_initializer: Optional[str] = "zeros", + n_blocks: int = 1, + n_layers: int = 3, + activation: str = "tanh", + recurrent_activation: str = "hard_sigmoid", + forget_bias: float = 1.0, + kernel_initializer: str = "glorot_uniform", + recurrent_initializer: str = "orthogonal", + bias_initializer: str = "zeros", kernel_regularizer: Optional[str] = None, recurrent_regularizer: Optional[str] = None, bias_regularizer: Optional[str] = None, diff --git a/nalp/models/maligan.py b/nalp/models/maligan.py index dcf0f96..b60d606 100644 --- a/nalp/models/maligan.py +++ b/nalp/models/maligan.py @@ -31,14 +31,14 @@ class MaliGAN(Adversarial): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - max_length: Optional[int] = 1, - embedding_size: Optional[int] = 32, - hidden_size: Optional[int] = 64, - n_filters: Optional[Tuple[int, ...]] = (64), - filters_size: Optional[Tuple[int, ...]] = (1), - dropout_rate: Optional[float] = 0.25, - temperature: Optional[float] = 1.0, + vocab_size: int = 1, + max_length: int = 1, + embedding_size: int = 32, + hidden_size: int = 64, + n_filters: Tuple[int, ...] = (64), + filters_size: Tuple[int, ...] = (1), + dropout_rate: float = 0.25, + temperature: float = 1.0, ) -> None: """Initialization method. @@ -124,7 +124,7 @@ def compile( self.history["G_loss"] = [] def generate_batch( - self, batch_size: Optional[int] = 1, length: Optional[int] = 1 + self, batch_size: int = 1, length: int = 1 ) -> tf.Tensor: """Generates a batch of tokens by feeding to the network the current token (t) and predicting the next token (t+1). @@ -246,8 +246,8 @@ def D_step(self, x: tf.Tensor, y: tf.Tensor) -> None: def pre_fit( self, batches: Dataset, - g_epochs: Optional[int] = 50, - d_epochs: Optional[int] = 10, + g_epochs: int = 50, + d_epochs: int = 10, ) -> None: """Pre-trains the model. @@ -318,7 +318,7 @@ def pre_fit( logger.to_file("Loss(D): %s", self.D_loss.result().numpy()) def fit( - self, batches: Dataset, epochs: Optional[int] = 10, d_epochs: Optional[int] = 5 + self, batches: Dataset, epochs: int = 10, d_epochs: int = 5 ) -> None: """Trains the model. diff --git a/nalp/models/relgan.py b/nalp/models/relgan.py index a89a4b7..f6b4f4c 100644 --- a/nalp/models/relgan.py +++ b/nalp/models/relgan.py @@ -28,18 +28,18 @@ class RelGAN(Adversarial): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - max_length: Optional[int] = 1, - embedding_size: Optional[int] = 32, - n_slots: Optional[int] = 3, - n_heads: Optional[int] = 5, - head_size: Optional[int] = 10, - n_blocks: Optional[int] = 1, - n_layers: Optional[int] = 3, - n_filters: Optional[Tuple[int, ...]] = (64), - filters_size: Optional[Tuple[int, ...]] = (1), - dropout_rate: Optional[float] = 0.25, - tau: Optional[float] = 5.0, + vocab_size: int = 1, + max_length: int = 1, + embedding_size: int = 32, + n_slots: int = 3, + n_heads: int = 5, + head_size: int = 10, + n_blocks: int = 1, + n_layers: int = 3, + n_filters: Tuple[int, ...] = (64), + filters_size: Tuple[int, ...] = (1), + dropout_rate: float = 0.25, + tau: float = 5.0, ): """Initialization method. @@ -252,7 +252,7 @@ def step(self, x: tf.Tensor, y: tf.Tensor) -> None: self.G_loss.update_state(G_loss) self.D_loss.update_state(D_loss) - def pre_fit(self, batches: Dataset, epochs: Optional[int] = 100) -> None: + def pre_fit(self, batches: Dataset, epochs: int = 100) -> None: """Pre-trains the model. Args: @@ -281,7 +281,7 @@ def pre_fit(self, batches: Dataset, epochs: Optional[int] = 100) -> None: logger.to_file("Loss(G): %s", self.G_loss.result().numpy()) - def fit(self, batches: Dataset, epochs: Optional[int] = 100) -> None: + def fit(self, batches: Dataset, epochs: int = 100) -> None: """Trains the model. Args: diff --git a/nalp/models/seqgan.py b/nalp/models/seqgan.py index 4e32b6e..3d216d5 100644 --- a/nalp/models/seqgan.py +++ b/nalp/models/seqgan.py @@ -30,14 +30,14 @@ class SeqGAN(Adversarial): def __init__( self, encoder: Optional[IntegerEncoder] = None, - vocab_size: Optional[int] = 1, - max_length: Optional[int] = 1, - embedding_size: Optional[int] = 32, - hidden_size: Optional[int] = 64, - n_filters: Optional[Tuple[int, ...]] = (64), - filters_size: Optional[Tuple[int, ...]] = (1), - dropout_rate: Optional[float] = 0.25, - temperature: Optional[float] = 1.0, + vocab_size: int = 1, + max_length: int = 1, + embedding_size: int = 32, + hidden_size: int = 64, + n_filters: Tuple[int, ...] = (64), + filters_size: Tuple[int, ...] = (1), + dropout_rate: float = 0.25, + temperature: float = 1.0, ) -> None: """Initialization method. @@ -122,7 +122,7 @@ def compile( self.history["G_loss"] = [] def generate_batch( - self, batch_size: Optional[int] = 1, length: Optional[int] = 1 + self, batch_size: int = 1, length: int = 1 ) -> tf.Tensor: """Generates a batch of tokens by feeding to the network the current token (t) and predicting the next token (t+1). @@ -262,8 +262,8 @@ def D_step(self, x: tf.Tensor, y: tf.Tensor) -> None: def pre_fit( self, batches: Dataset, - g_epochs: Optional[int] = 50, - d_epochs: Optional[int] = 10, + g_epochs: int = 50, + d_epochs: int = 10, ) -> None: """Pre-trains the model. @@ -336,10 +336,10 @@ def pre_fit( def fit( self, batches: Dataset, - epochs: Optional[int] = 10, - g_epochs: Optional[int] = 1, - d_epochs: Optional[int] = 5, - n_rollouts: Optional[int] = 16, + epochs: int = 10, + g_epochs: int = 1, + d_epochs: int = 5, + n_rollouts: int = 16, ) -> None: """Trains the model. diff --git a/nalp/models/wgan.py b/nalp/models/wgan.py index de71a43..6ed4925 100644 --- a/nalp/models/wgan.py +++ b/nalp/models/wgan.py @@ -1,7 +1,7 @@ """Wasserstein Generative Adversarial Network. """ -from typing import Optional, Tuple +from typing import Tuple import tensorflow as tf from tensorflow.keras.utils import Progbar @@ -32,14 +32,14 @@ class WGAN(Adversarial): def __init__( self, - input_shape: Optional[Tuple[int, int, int]] = (28, 28, 1), - noise_dim: Optional[int] = 100, - n_samplings: Optional[int] = 3, - alpha: Optional[float] = 0.3, - dropout_rate: Optional[float] = 0.3, - model_type: Optional[str] = "wc", - clip: Optional[float] = 0.01, - penalty: Optional[int] = 10, + input_shape: Tuple[int, int, int] = (28, 28, 1), + noise_dim: int = 100, + n_samplings: int = 3, + alpha: float = 0.3, + dropout_rate: float = 0.3, + model_type: str = "wc", + clip: float = 0.01, + penalty: int = 10, ): """Initialization method. @@ -202,8 +202,8 @@ def G_step(self, x: tf.Tensor) -> None: def fit( self, batches: Dataset, - epochs: Optional[int] = 100, - critic_steps: Optional[int] = 5, + epochs: int = 100, + critic_steps: int = 5, ) -> None: """Trains the model.