Skip to content

Commit

Permalink
chore(nalp): Only missing adversarial models without annotated typing.
Browse files Browse the repository at this point in the history
  • Loading branch information
gugarosa committed Apr 22, 2022
1 parent cc0f353 commit cac4f71
Show file tree
Hide file tree
Showing 18 changed files with 479 additions and 326 deletions.
29 changes: 18 additions & 11 deletions nalp/models/discriminators/conv.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""Convolutional discriminator.
"""

from typing import Optional

import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Dense, Dropout

Expand All @@ -16,13 +18,18 @@ class ConvDiscriminator(Discriminator):
"""

def __init__(self, n_samplings=3, alpha=0.3, dropout_rate=0.3):
def __init__(
self,
n_samplings: Optional[int] = 3,
alpha: Optional[float] = 0.3,
dropout_rate: Optional[float] = 0.3,
) -> None:
"""Initialization method.
Args:
n_samplings (int): Number of downsamplings to perform.
alpha (float): LeakyReLU activation threshold.
dropout_rate (float): Dropout activation rate.
n_samplings: Number of downsamplings to perform.
alpha: LeakyReLU activation threshold.
dropout_rate: Dropout activation rate.
"""

Expand Down Expand Up @@ -52,24 +59,24 @@ def __init__(self, n_samplings=3, alpha=0.3, dropout_rate=0.3):
logger.info("Class overrided.")

@property
def alpha(self):
"""float: LeakyReLU activation threshold."""
def alpha(self) -> float:
"""LeakyReLU activation threshold."""

return self._alpha

@alpha.setter
def alpha(self, alpha):
def alpha(self, alpha: float) -> None:
self._alpha = alpha

def call(self, x, training=True):
def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor:
"""Method that holds vital information whenever this class is called.
Args:
x (tf.tensor): A tensorflow's tensor holding input data.
training (bool): Whether architecture is under training or not.
x: A tensorflow's tensor holding input data.
training: Whether architecture is under training or not.
Returns:
The same tensor after passing through each defined layer.
(tf.Tensor): The same tensor after passing through each defined layer.
"""

Expand Down
36 changes: 19 additions & 17 deletions nalp/models/discriminators/embedded_text.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""Embedded-text discriminator.
"""

from typing import Optional, Tuple

import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Embedding, MaxPool1D

Expand All @@ -18,22 +20,22 @@ class EmbeddedTextDiscriminator(Discriminator):

def __init__(
self,
vocab_size=1,
max_length=1,
embedding_size=32,
n_filters=(64),
filters_size=(1),
dropout_rate=0.25,
):
vocab_size: Optional[int] = 1,
max_length: Optional[int] = 1,
embedding_size: Optional[int] = 32,
n_filters: Optional[Tuple[int, ...]] = (64),
filters_size: Optional[Tuple[int, ...]] = (1),
dropout_rate: Optional[float] = 0.25,
) -> None:
"""Initialization method.
Args:
vocab_size (int): The size of the vocabulary.
max_length (int): Maximum length of the sequences.
embedding_size (int): The size of the embedding layer.
n_filters (tuple): Number of filters to be applied.
filters_size (tuple): Size of filters to be applied.
dropout_rate (float): Dropout activation rate.
vocab_size: The size of the vocabulary.
max_length: Maximum length of the sequences.
embedding_size: The size of the embedding layer.
n_filters: Number of filters to be applied.
filters_size: Size of filters to be applied.
dropout_rate: Dropout activation rate.
"""

Expand Down Expand Up @@ -72,15 +74,15 @@ def __init__(

logger.info("Class overrided.")

def call(self, x, training=True):
def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor:
"""Method that holds vital information whenever this class is called.
Args:
x (tf.tensor): A tensorflow's tensor holding input data.
training (bool): Whether architecture is under training or not.
x: A tensorflow's tensor holding input data.
training: Whether architecture is under training or not.
Returns:
The same tensor after passing through each defined layer.
(tf.Tensor): The same tensor after passing through each defined layer.
"""

Expand Down
24 changes: 14 additions & 10 deletions nalp/models/discriminators/linear.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""Linear discriminator.
"""

from typing import Optional

import tensorflow as tf
from tensorflow.keras.layers import Dense

Expand All @@ -16,12 +18,14 @@ class LinearDiscriminator(Discriminator):
"""

def __init__(self, n_samplings=3, alpha=0.01):
def __init__(
self, n_samplings: Optional[int] = 3, alpha: Optional[float] = 0.01
) -> None:
"""Initialization method.
Args:
n_samplings (int): Number of downsamplings to perform.
alpha (float): LeakyReLU activation threshold.
n_samplings: Number of downsamplings to perform.
alpha: LeakyReLU activation threshold.
"""

Expand All @@ -43,24 +47,24 @@ def __init__(self, n_samplings=3, alpha=0.01):
logger.info("Class overrided.")

@property
def alpha(self):
"""float: LeakyReLU activation threshold."""
def alpha(self) -> float:
"""LeakyReLU activation threshold."""

return self._alpha

@alpha.setter
def alpha(self, alpha):
def alpha(self, alpha: float) -> None:
self._alpha = alpha

def call(self, x, training=True):
def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor:
"""Method that holds vital information whenever this class is called.
Args:
x (tf.tensor): A tensorflow's tensor holding input data.
training (bool): Whether architecture is under training or not.
x: A tensorflow's tensor holding input data.
training: Whether architecture is under training or not.
Returns:
The same tensor after passing through each defined layer.
(tf.Tensor): The same tensor after passing through each defined layer.
"""

Expand Down
17 changes: 11 additions & 6 deletions nalp/models/discriminators/lstm.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
"""Long Short-Term Memory discriminator.
"""

from typing import Optional

import tensorflow as tf
from tensorflow.keras.layers import RNN, Dense, LSTMCell

from nalp.core import Discriminator
Expand All @@ -18,12 +21,14 @@ class LSTMDiscriminator(Discriminator):
"""

def __init__(self, embedding_size=32, hidden_size=64):
def __init__(
self, embedding_size: Optional[int] = 32, hidden_size: Optional[int] = 64
) -> None:
"""Initialization method.
Args:
embedding_size (int): The size of the embedding layer.
hidden_size (int): The amount of hidden neurons.
embedding_size: The size of the embedding layer.
hidden_size: The amount of hidden neurons.
"""

Expand All @@ -47,14 +52,14 @@ def __init__(self, embedding_size=32, hidden_size=64):

logger.info("Class overrided.")

def call(self, x):
def call(self, x: tf.Tensor) -> tf.Tensor:
"""Method that holds vital information whenever this class is called.
Args:
x (tf.tensor): A tensorflow's tensor holding input data.
x: A tensorflow's tensor holding input data.
Returns:
The same tensor after passing through each defined layer.
(tf.Tensor): The same tensor after passing through each defined layer.
"""

Expand Down
32 changes: 17 additions & 15 deletions nalp/models/discriminators/text.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""Text-based discriminator.
"""

from typing import Optional, Tuple

import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Dense, Dropout, MaxPool1D

Expand All @@ -18,20 +20,20 @@ class TextDiscriminator(Discriminator):

def __init__(
self,
max_length=1,
embedding_size=32,
n_filters=(64),
filters_size=(1),
dropout_rate=0.25,
):
max_length: Optional[int] = 1,
embedding_size: Optional[int] = 32,
n_filters: Optional[Tuple[int, ...]] = (64),
filters_size: Optional[Tuple[int, ...]] = (1),
dropout_rate: Optional[float] = 0.25,
) -> None:
"""Initialization method.
Args:
max_length (int): Maximum length of the sequences.
embedding_size (int): The size of the embedding layer.
n_filters (tuple): Number of filters to be applied.
filters_size (tuple): Size of filters to be applied.
dropout_rate (float): Dropout activation rate.
max_length: Maximum length of the sequences.
embedding_size: The size of the embedding layer.
n_filters: Number of filters to be applied.
filters_size: Size of filters to be applied.
dropout_rate: Dropout activation rate.
"""

Expand Down Expand Up @@ -67,15 +69,15 @@ def __init__(

logger.info("Class overrided.")

def call(self, x, training=True):
def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor:
"""Method that holds vital information whenever this class is called.
Args:
x (tf.tensor): A tensorflow's tensor holding input data.
training (bool): Whether architecture is under training or not.
x: A tensorflow's tensor holding input data.
training: Whether architecture is under training or not.
Returns:
The same tensor after passing through each defined layer.
(tf.Tensor): The same tensor after passing through each defined layer.
"""

Expand Down
31 changes: 20 additions & 11 deletions nalp/models/generators/bi_lstm.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
"""Bi-directional Long Short-Term Memory generator.
"""

from typing import Optional

import tensorflow as tf
from tensorflow.keras.layers import RNN, Dense, Embedding, LSTMCell

from nalp.core import Generator
from nalp.encoders.integer import IntegerEncoder
from nalp.utils import logging

logger = logging.get_logger(__name__)
Expand All @@ -19,14 +22,20 @@ class BiLSTMGenerator(Generator):
"""

def __init__(self, encoder=None, vocab_size=1, embedding_size=32, hidden_size=64):
def __init__(
self,
encoder: Optional[IntegerEncoder] = None,
vocab_size: Optional[int] = 1,
embedding_size: Optional[int] = 32,
hidden_size: Optional[int] = 64,
) -> None:
"""Initialization method.
Args:
encoder (IntegerEncoder): An index to vocabulary encoder.
vocab_size (int): The size of the vocabulary.
embedding_size (int): The size of the embedding layer.
hidden_size (int): The amount of hidden neurons.
encoder: An index to vocabulary encoder.
vocab_size: The size of the vocabulary.
embedding_size: The size of the embedding layer.
hidden_size: The amount of hidden neurons.
"""

Expand Down Expand Up @@ -66,23 +75,23 @@ def __init__(self, encoder=None, vocab_size=1, embedding_size=32, hidden_size=64
logger.info("Class overrided.")

@property
def encoder(self):
"""obj: An encoder generic object."""
def encoder(self) -> IntegerEncoder:
"""An encoder generic object."""

return self._encoder

@encoder.setter
def encoder(self, encoder):
def encoder(self, encoder: IntegerEncoder) -> None:
self._encoder = encoder

def call(self, x):
def call(self, x: tf.Tensor) -> tf.Tensor:
"""Method that holds vital information whenever this class is called.
Args:
x (tf.tensor): A tensorflow's tensor holding input data.
x: A tensorflow's tensor holding input data.
Returns:
The same tensor after passing through each defined layer.
(tf.Tensor): The same tensor after passing through each defined layer.
"""

Expand Down
Loading

0 comments on commit cac4f71

Please sign in to comment.