-
Notifications
You must be signed in to change notification settings - Fork 1
/
train_relativerotation.py
executable file
·302 lines (231 loc) · 12.5 KB
/
train_relativerotation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
from utils.torch_util import freeze
MAX_TRAIN_SAMPLES_PER_EPOCH = 2000
MAX_VAL_SAMPLES = 200
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--device_id', default=1)
parser.add_argument('hyper_config', help="Hyperparams config python file.")
parser.add_argument('ldd_config', help='Loss and diag dict config python file')
parser.add_argument('train_dset_path', type=str)
parser.add_argument('val_dset_path', type=str)
# parser.add_argument('loss_str', type=str, help="Loss str describing what loss fnx to use")
parser.add_argument('--resume_pkl', type=str, help="Checkpoint to resume from")
parser.add_argument('--load_optim', action='store_true')
parser.add_argument('--lr', type=float, default=0.0003)
# parser.add_argument('--kld_weight', type=float, default=.00001)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--checkpoint_freq', type=int, default=10)
parser.add_argument('--evaluation_freq', type=int, default=1, help="Evaluate every n epochs")
parser.add_argument('--augment_mesh_freq', type=int, default=100)
parser.add_argument('--max_partitions', type=int, default=20, help="Max number of batches to train each epoch")
parser.add_argument('--max_evaluation_episodes', type=int, default=10)
parser.add_argument('--enable_scheduler', action='store_true')
parser.add_argument('--num_warmup_steps', type=int, default=500, help='Number of steps to warmup with the LR scheduler')
parser.add_argument('--num_training_steps', type=int, default=2000, help='Number of steps before LR rate converges to 0,'
'after warming up')
parser.add_argument('--clip', type=int, default=1, help='Gradient clipping')
parser.add_argument('--seed', type=int, default=0, help='Torch and Numpy random seed')
parser.add_argument('--test_model_load_reproducibility', action='store_true', help="Serialize and load model and check that "
"evaluation performance is the same")
parser.add_argument('--num_points', type=int, default=150, help="Num points for FPS sampling")
parser.add_argument('--num_fps_samples', type=int, default=1, help="Num samples for FPS sampling")
parser.add_argument('--resume_initconfig', type=str, help="Initconfig to resume from")
parser.add_argument('--rot_mag_bound', type=float, help="How much to bound the rotation magnitude", default=2*3.14159)
def none_or_str(value):
if value == 'None':
return None
return value
parser.add_argument('--rot_aug', default="xyz", type=none_or_str)
parser.add_argument('--scale_aug', default="xyz", help="What type of scale aug to use")
parser.add_argument('--shear_aug', type=str, default="xy")
parser.add_argument('--log_gradients', action='store_true', help="Log gradients")
parser.add_argument('--max_z_scale', type=float, default=2.0)
parser.add_argument('--min_z_scale', type=float, default=0.5)
parser.add_argument('--max_shear', type=float, default=0.5)
parser.add_argument('--detect_anomaly', action='store_true')
parser.add_argument('--freeze_threshold', type=float, default=0.05)
parser.add_argument('--freeze_encoder', action='store_true', help="Freeze encoder if reconstruction error falls below"
"freeze threshold")
parser.add_argument('--freeze_decoder', action='store_true', help="Freeze decoder")
parser.add_argument('--gpu0', type=int, default=0, help="GPU ID to use for multi GPU 0")
parser.add_argument('--gpu1', type=int, default=1, help="GPU ID to use for multi GPU 1")
parser.add_argument('--stats_json', type=str, help='Used for normalization')
parser.add_argument('--center_fps_pc', action='store_true', help='Center FPS')
parser.add_argument('--no_linear_search', action='store_false', help='Use linear search for the plane label generation')
parser.add_argument('--threshold_frac',type=float, default=.02, help='Fraction of points to use for plane label generation')
parser.add_argument('--max_frac_threshold', type=float, default=.1)
parser.add_argument('--dont_make_btb', action='store_true')
parser.add_argument('--randomize_z_canonical', action='store_true')
parser.add_argument('--further_downsample_frac', default=None)
parser.add_argument('--depth_scale', type=float, default=1.0, help="Dataset depth scale")
parser.add_argument('--augment_extrinsics', action='store_true', help="Dataset augment extrinsics")
parser.add_argument('--extrinsics_noise_scale', type=float, default=1.0, help="Dataset extrinsics noise scale")
args = parser.parse_args()
import torch
torch.random.manual_seed(args.seed)
import numpy
numpy.random.seed(args.seed)
import random
random.seed(args.seed)
from utils import train_util, misc_util, loss_util
# from supervised_training.utils.misc_util import *
# from supervised_training.utils.loss_util import *
import wandb
import subprocess
# from supervised_training.optim.hugging_face_optimization import get_linear_schedule_with_warmup
import json
from torch.utils.data import DataLoader
import os
from data_generation.sim_dataset import PybulletPointcloudDataset
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
# git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode(("utf-8")).split("\n")[0]
if args.detect_anomaly:
torch.autograd.set_detect_anomaly(True)
wandb.init(project="bandu_v1_full_clean", tags=[])
# wandb.config['git_id'] = git_hash
wandb.config['run_dir'] = wandb.run.dir
# save the following config files to wandb
wandb.save(args.hyper_config)
wandb.save(args.ldd_config)
wandb.save(args.stats_json)
config = misc_util.load_hyperconfig_from_filepath(args.hyper_config)
wandb.config.update(config)
wandb.config.update(vars(args))
wandb.config.update(misc_util.load_ldd_from_filepath(args.ldd_config).loss_params)
models_dict = train_util.model_creator(config=config,
device_id=args.device_id)
model = next(iter(models_dict.items()))[1]
if hasattr(model, "multi_gpu") and model.multi_gpu:
model.gpu_0 = torch.device(f"cuda:{args.gpu0}")
model.gpu_1 = torch.device(f"cuda:{args.gpu1}")
if args.resume_pkl:
pkl = torch.load(args.resume_pkl)
random_sd = model.state_dict()
random_sd.update(pkl['model'])
model.load_state_dict(random_sd)
MODEL_DEVICE = next(model.parameters()).device
optimizer = torch.optim.Adam(model.parameters(),
lr=args.lr)
if args.resume_pkl and args.load_optim:
pkl = torch.load(args.resume_pkl)
optimizer.load_state_dict(pkl['opt'])
num_epochs = 100000
# if args.enable_scheduler:
# scheduler = get_linear_schedule_with_warmup(optimizer, args.num_warmup_steps, args.num_training_steps)
forward_pass_dict = dict()
total_iteration = 0
if args.log_gradients:
wandb.watch(model, log_freq=1)
if args.stats_json:
with open(args.stats_json, "r") as fp:
stats_dic = json.load(fp)
else:
stats_dic = None
below_freeze_threshold_count = 0
train_dset = PybulletPointcloudDataset(args.train_dset_path,
stats_dic=stats_dic,
center_fps_pc=args.center_fps_pc,
linear_search=args.no_linear_search,
threshold_frac=args.threshold_frac,
max_frac_threshold=args.max_frac_threshold,
rot_aug=args.rot_aug,
dont_make_btb=args.dont_make_btb,
randomize_z_canonical=args.randomize_z_canonical,
further_downsample_frac=args.further_downsample_frac,
augment_extrinsics=args.augment_extrinsics,
depth_noise_scale=args.depth_scale,
extrinsics_noise_scale=args.extrinsics_noise_scale)
val_dset = PybulletPointcloudDataset(args.val_dset_path,
stats_dic=stats_dic,
center_fps_pc=args.center_fps_pc,
linear_search=args.no_linear_search,
threshold_frac=args.threshold_frac,
max_frac_threshold=args.max_frac_threshold,
rot_aug=args.rot_aug,
dont_make_btb=args.dont_make_btb,
randomize_z_canonical=args.randomize_z_canonical,
further_downsample_frac=args.further_downsample_frac,
augment_extrinsics=args.augment_extrinsics,
depth_noise_scale=args.depth_scale,
extrinsics_noise_scale=args.extrinsics_noise_scale)
train_dloader = DataLoader(train_dset, pin_memory=True, batch_size=args.batch_size, drop_last=True, shuffle=True,
num_workers=8)
val_dloader = DataLoader(val_dset, pin_memory=True, batch_size=args.batch_size, drop_last=True, shuffle=True,
num_workers=8)
get_loss_and_diag_dict = misc_util.load_ldd_from_filepath(args.ldd_config).get_loss_and_diag_dict
for epoch in range(num_epochs):
print(f"ln73 Epoch {epoch}")
if epoch % args.evaluation_freq == 0 and epoch > 0:
model.eval()
for batch_ndx, batch in enumerate(val_dloader):
if batch_ndx * args.batch_size > MAX_VAL_SAMPLES:
break
optimizer.zero_grad()
print("ln295 val forward")
torch.cuda.empty_cache()
with torch.no_grad():
predictions = model(batch)
val_loss, val_diag_dict = get_loss_and_diag_dict(predictions, batch,
increment_iteration=False
)
wandb_dict = {"val/total_loss": val_loss.data.cpu().numpy(),
"val/total_iteration": total_iteration,
"epoch": epoch}
wandb_dict.update(**{f"val/{k}": v for k, v in val_diag_dict.items()})
# if "cvae_bce_cat" in args.loss_str:
# wandb_dict.update(current_temp=models_dict['surface_classifier'].current_temp,
# )
wandb.log(wandb_dict, step=total_iteration)
model.train()
if epoch % args.checkpoint_freq == 0:
dic = dict(model=model.state_dict(),
opt=optimizer.state_dict(),
)
# if args.enable_scheduler:
# dic['scheduler_get_lr'] = scheduler.get_lr()
torch.save(dic, os.path.join(wandb.run.dir, f"{wandb.run.name}_checkpoint{epoch}"))
print("ln177 checkpoint path")
print(os.path.join(wandb.run.dir, f"checkpoint{epoch}"))
# for iter_ in range(100):
for batch_ndx, batch in enumerate(train_dloader):
if batch_ndx * args.batch_size > MAX_TRAIN_SAMPLES_PER_EPOCH:
break
if args.freeze_encoder:
freeze(model.pointcloud_encoder)
freeze(model.fc_embedding2z)
if args.freeze_decoder:
freeze(model.pointcloud_decoder)
predictions = model(batch)
optimizer.zero_grad()
loss, diag_dict = get_loss_and_diag_dict(predictions, batch)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
# if args.enable_scheduler:
# scheduler.step()
total_iteration += 1
wandb_dict = dict(total_loss=loss.data.cpu().numpy(),
total_iteration=total_iteration,
epoch=epoch)
wandb_dict.update(**diag_dict)
"""
Custom Diagnostics for Various Models
"""
# if "cvae" in args.loss_str:
# if "prior_kld" in config['loss_params']['kl_type']:
# wandb_dict.update(current_temp=model.current_temp)
#
wandb_dict.update(current_temp=models_dict['surface_classifier'].current_temp)
#
# if "maf" in args.loss_str:
# wandb_dict.update(prior_flow_log_prob=
# wandb.Histogram(predictions['prior'][0].data.cpu().numpy()))
#
for k, v in predictions['prior'].items():
wandb_dict[k] = wandb.Histogram(v.data.cpu().numpy())
for k, v in predictions['encoder'].items():
wandb_dict[k] = wandb.Histogram(v.data.cpu().numpy())
for k, v in wandb_dict.items():
if isinstance(v, torch.Tensor):
wandb_dict[k] = v.data.cpu().numpy()
wandb.log(wandb_dict, step=total_iteration)