Skip to content

Commit

Permalink
fix(root): Fixes usage of Optional[] type hint.
Browse files Browse the repository at this point in the history
  • Loading branch information
gugarosa committed Oct 14, 2023
1 parent 403c309 commit fc652b6
Show file tree
Hide file tree
Showing 33 changed files with 197 additions and 206 deletions.
4 changes: 2 additions & 2 deletions nalp/core/corpus.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"""

from collections import Counter
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List

import nalp.utils.constants as c
import nalp.utils.preprocess as p
Expand All @@ -21,7 +21,7 @@ class Corpus:
"""

def __init__(self, min_frequency: Optional[int] = 1) -> None:
def __init__(self, min_frequency: int = 1) -> None:
"""Initialization method."""

self.min_frequency = min_frequency
Expand Down
4 changes: 1 addition & 3 deletions nalp/core/dataset.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
"""Dataset-related class.
"""

from typing import Optional

import tensorflow as tf

import nalp.utils.constants as c
Expand All @@ -14,7 +12,7 @@ class Dataset:
"""

def __init__(self, shuffle: Optional[bool] = True) -> None:
def __init__(self, shuffle: bool = True) -> None:
"""Initialization method.
Args:
Expand Down
26 changes: 13 additions & 13 deletions nalp/core/model.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Model-related classes.
"""

from typing import Any, Dict, List, Optional
from typing import Any, Dict, List

import tensorflow as tf
from tensorflow.keras import Model
Expand All @@ -20,7 +20,7 @@ class Discriminator(Model):
"""

def __init__(self, name: Optional[str] = "") -> None:
def __init__(self, name: str = "") -> None:
"""Initialization method.
Note that basic variables shared by all childs should be declared here, e.g., layers.
Expand All @@ -32,7 +32,7 @@ def __init__(self, name: Optional[str] = "") -> None:

super(Discriminator, self).__init__(name=name)

def call(self, x: tf.Tensor, training: Optional[bool] = True) -> None:
def call(self, x: tf.Tensor, training: bool = True) -> None:
"""Method that holds vital information whenever this class is called.
Note that you will need to implement this method directly on its child. Essentially,
Expand All @@ -56,7 +56,7 @@ class Generator(Model):
"""

def __init__(self, name: Optional[str] = "") -> None:
def __init__(self, name: str = "") -> None:
"""Initialization method.
Note that basic variables shared by all childs should be declared here, e.g., layers.
Expand All @@ -68,7 +68,7 @@ def __init__(self, name: Optional[str] = "") -> None:

super(Generator, self).__init__(name=name)

def call(self, x: tf.Tensor, training: Optional[bool] = True) -> None:
def call(self, x: tf.Tensor, training: bool = True) -> None:
"""Method that holds vital information whenever this class is called.
Note that you will need to implement this method directly on its child. Essentially,
Expand All @@ -86,7 +86,7 @@ def call(self, x: tf.Tensor, training: Optional[bool] = True) -> None:
raise NotImplementedError

def generate_greedy_search(
self, start: str, max_length: Optional[int] = 100
self, start: str, max_length: int = 100
) -> List[str]:
"""Generates text by using greedy search, where the sampled
token is always sampled according to the maximum probability.
Expand Down Expand Up @@ -125,8 +125,8 @@ def generate_greedy_search(
def generate_temperature_sampling(
self,
start: str,
max_length: Optional[int] = 100,
temperature: Optional[float] = 1.0,
max_length: int = 100,
temperature: float = 1.0,
) -> List[str]:
"""Generates text by using temperature sampling, where the sampled
token is sampled according to a multinomial/categorical distribution.
Expand Down Expand Up @@ -168,9 +168,9 @@ def generate_temperature_sampling(
def generate_top_sampling(
self,
start: str,
max_length: Optional[int] = 100,
k: Optional[int] = 0,
p: Optional[float] = 0.0,
max_length: int = 100,
k: int = 0,
p: float = 0.0,
) -> List[str]:
"""Generates text by using top-k and top-p sampling, where the sampled
token is sampled according to the `k` most likely words distribution, as well
Expand Down Expand Up @@ -240,7 +240,7 @@ def __init__(
self,
discriminator: Discriminator,
generator: Generator,
name: Optional[str] = "",
name: str = "",
) -> None:
"""Initialization method.
Expand Down Expand Up @@ -371,7 +371,7 @@ def step(self, x: tf.Tensor) -> None:
self.G_loss.update_state(G_loss)
self.D_loss.update_state(D_loss)

def fit(self, batches: Dataset, epochs: Optional[int] = 100) -> None:
def fit(self, batches: Dataset, epochs: int = 100) -> None:
"""Trains the model.
Args:
Expand Down
4 changes: 1 addition & 3 deletions nalp/corpus/audio.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
"""Audio-related corpus.
"""

from typing import Optional

from nalp.core import Corpus
from nalp.utils import loader, logging

Expand All @@ -17,7 +15,7 @@ class AudioCorpus(Corpus):
"""

def __init__(self, from_file: str, min_frequency: Optional[int] = 1) -> None:
def __init__(self, from_file: str, min_frequency: int = 1) -> None:
"""Initialization method.
Args:
Expand Down
8 changes: 4 additions & 4 deletions nalp/corpus/sentence.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ def __init__(
self,
tokens: Optional[List[str]] = None,
from_file: Optional[str] = None,
corpus_type: Optional[str] = "char",
min_frequency: Optional[int] = 1,
max_pad_length: Optional[int] = None,
sos_eos_tokens: Optional[bool] = True,
corpus_type: str = "char",
min_frequency: int = 1,
max_pad_length: int = None,
sos_eos_tokens: bool = True,
) -> None:
"""Initialization method.
Expand Down
4 changes: 2 additions & 2 deletions nalp/corpus/text.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ def __init__(
self,
tokens: Optional[List[str]] = None,
from_file: Optional[str] = None,
corpus_type: Optional[str] = "char",
min_frequency: Optional[int] = 1,
corpus_type: str = "char",
min_frequency: int = 1,
) -> None:
"""Initialization method.
Expand Down
6 changes: 3 additions & 3 deletions nalp/datasets/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ class ImageDataset(Dataset):
def __init__(
self,
images: np.array,
batch_size: Optional[int] = 256,
batch_size: int = 256,
shape: Optional[Tuple[int, int]] = None,
normalize: Optional[bool] = True,
shuffle: Optional[bool] = True,
normalize: bool = True,
shuffle: bool = True,
) -> None:
"""Initialization method.
Expand Down
8 changes: 4 additions & 4 deletions nalp/datasets/language_modeling.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Language modeling dataset class.
"""

from typing import Optional, Tuple
from typing import Tuple

import numpy as np
import tensorflow as tf
Expand All @@ -21,9 +21,9 @@ class LanguageModelingDataset(Dataset):
def __init__(
self,
encoded_tokens: np.array,
max_contiguous_pad_length: Optional[int] = 1,
batch_size: Optional[int] = 64,
shuffle: Optional[bool] = True,
max_contiguous_pad_length: int = 1,
batch_size: int = 64,
shuffle: bool = True,
) -> None:
"""Initialization method.
Expand Down
14 changes: 7 additions & 7 deletions nalp/encoders/word2vec.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"""

import multiprocessing
from typing import List, Optional
from typing import List

import numpy as np
from gensim.models.word2vec import Word2Vec as W2V
Expand Down Expand Up @@ -31,12 +31,12 @@ def __init__(self) -> None:
def learn(
self,
tokens: List[str],
max_features: Optional[int] = 128,
window_size: Optional[int] = 5,
min_count: Optional[int] = 1,
algorithm: Optional[bool] = 0,
learning_rate: Optional[float] = 0.01,
iterations: Optional[int] = 1000,
max_features: int = 128,
window_size: int = 5,
min_count: int = 1,
algorithm: int = 0,
learning_rate: float = 0.01,
iterations: int = 1000,
):
"""Learns a Word2Vec representation based on the its methodology.
Expand Down
12 changes: 6 additions & 6 deletions nalp/models/dcgan.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Deep Convolutional Generative Adversarial Network.
"""

from typing import Optional, Tuple
from typing import Tuple

from nalp.core import Adversarial
from nalp.models.discriminators import ConvDiscriminator
Expand All @@ -23,11 +23,11 @@ class DCGAN(Adversarial):

def __init__(
self,
input_shape: Optional[Tuple[int, int, int]] = (28, 28, 1),
noise_dim: Optional[int] = 100,
n_samplings: Optional[int] = 3,
alpha: Optional[float] = 0.3,
dropout_rate: Optional[float] = 0.3,
input_shape: Tuple[int, int, int] = (28, 28, 1),
noise_dim: int = 100,
n_samplings: int = 3,
alpha: float = 0.3,
dropout_rate: float = 0.3,
) -> None:
"""Initialization method.
Expand Down
10 changes: 4 additions & 6 deletions nalp/models/discriminators/conv.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
"""Convolutional discriminator.
"""

from typing import Optional

import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Dense, Dropout

Expand All @@ -20,9 +18,9 @@ class ConvDiscriminator(Discriminator):

def __init__(
self,
n_samplings: Optional[int] = 3,
alpha: Optional[float] = 0.3,
dropout_rate: Optional[float] = 0.3,
n_samplings: int = 3,
alpha: float = 0.3,
dropout_rate: float = 0.3,
) -> None:
"""Initialization method.
Expand Down Expand Up @@ -64,7 +62,7 @@ def alpha(self) -> float:
def alpha(self, alpha: float) -> None:
self._alpha = alpha

def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor:
def call(self, x: tf.Tensor, training: bool = True) -> tf.Tensor:
"""Method that holds vital information whenever this class is called.
Args:
Expand Down
16 changes: 8 additions & 8 deletions nalp/models/discriminators/embedded_text.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Embedded-text discriminator.
"""

from typing import Optional, Tuple
from typing import Tuple

import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Embedding, MaxPool1D
Expand All @@ -20,12 +20,12 @@ class EmbeddedTextDiscriminator(Discriminator):

def __init__(
self,
vocab_size: Optional[int] = 1,
max_length: Optional[int] = 1,
embedding_size: Optional[int] = 32,
n_filters: Optional[Tuple[int, ...]] = (64),
filters_size: Optional[Tuple[int, ...]] = (1),
dropout_rate: Optional[float] = 0.25,
vocab_size: int = 1,
max_length: int = 1,
embedding_size: int = 32,
n_filters: Tuple[int, ...] = (64),
filters_size: Tuple[int, ...] = (1),
dropout_rate: float = 0.25,
) -> None:
"""Initialization method.
Expand Down Expand Up @@ -68,7 +68,7 @@ def __init__(

logger.info("Class overrided.")

def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor:
def call(self, x: tf.Tensor, training: bool = True) -> tf.Tensor:
"""Method that holds vital information whenever this class is called.
Args:
Expand Down
6 changes: 2 additions & 4 deletions nalp/models/discriminators/linear.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
"""Linear discriminator.
"""

from typing import Optional

import tensorflow as tf
from tensorflow.keras.layers import Dense

Expand All @@ -19,7 +17,7 @@ class LinearDiscriminator(Discriminator):
"""

def __init__(
self, n_samplings: Optional[int] = 3, alpha: Optional[float] = 0.01
self, n_samplings: int = 3, alpha: float = 0.01
) -> None:
"""Initialization method.
Expand Down Expand Up @@ -53,7 +51,7 @@ def alpha(self) -> float:
def alpha(self, alpha: float) -> None:
self._alpha = alpha

def call(self, x: tf.Tensor, training: Optional[bool] = True) -> tf.Tensor:
def call(self, x: tf.Tensor, training: bool = True) -> tf.Tensor:
"""Method that holds vital information whenever this class is called.
Args:
Expand Down
4 changes: 1 addition & 3 deletions nalp/models/discriminators/lstm.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
"""Long Short-Term Memory discriminator.
"""

from typing import Optional

import tensorflow as tf
from tensorflow.keras.layers import RNN, Dense, LSTMCell

Expand All @@ -22,7 +20,7 @@ class LSTMDiscriminator(Discriminator):
"""

def __init__(
self, embedding_size: Optional[int] = 32, hidden_size: Optional[int] = 64
self, embedding_size: int = 32, hidden_size: int = 64
) -> None:
"""Initialization method.
Expand Down
Loading

0 comments on commit fc652b6

Please sign in to comment.