-
Notifications
You must be signed in to change notification settings - Fork 281
/
Copy pathdefaults.ini
72 lines (48 loc) · 1.43 KB
/
defaults.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
[DEFAULTS]
#name of the run
name = stable_audio_tools
# name of the project
project = None
# the batch size
batch_size = 4
# If `true`, attempts to resume training from latest checkpoint.
# In this case, each run must have unique config filename.
recover = false
# Save top K model checkpoints during training.
save_top_k = -1
# number of nodes to use for training
num_nodes = 1
# Multi-GPU strategy for PyTorch Lightning
strategy = "auto"
# Precision to use for training
precision = "16-mixed"
# number of CPU workers for the DataLoader
num_workers = 6
# the random seed
seed = 42
# Batches for gradient accumulation
accum_batches = 1
# Number of steps between checkpoints
checkpoint_every = 10000
# Number of steps between validation runs
val_every = -1
# trainer checkpoint file to restart training from
ckpt_path = ''
# model checkpoint file to start a new training run from
pretrained_ckpt_path = ''
# Checkpoint path for the pretransform model if needed
pretransform_ckpt_path = ''
# configuration model specifying model hyperparameters
model_config = ''
# configuration for datasets
dataset_config = ''
# configuration for validation datasets
val_dataset_config = ''
# directory to save the checkpoints in
save_dir = ''
# gradient_clip_val passed into PyTorch Lightning Trainer
gradient_clip_val = 0.0
# remove the weight norm from the pretransform model
remove_pretransform_weight_norm = ''
# Logger type to use
logger = 'wandb'