-
Notifications
You must be signed in to change notification settings - Fork 1
/
run.py
executable file
·115 lines (98 loc) · 3.47 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
#!/usr/bin/env python3
import argparse
import random
import os
import numpy as np
import torch
from habitat import logger
from habitat_baselines.common.baseline_registry import baseline_registry
import habitat_extensions # noqa: F401
import vlnce_baselines # noqa: F401
from vlnce_baselines.config.default import get_config
# from vlnce_baselines.nonlearning_agents import (
# evaluate_agent,
# nonlearning_inference,
# )
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--exp_name",
type=str,
default="test",
required=True,
help="experiment id that matches to exp-id in Notion log",
)
parser.add_argument(
"--run-type",
choices=["train", "eval", "inference"],
required=True,
help="run type of the experiment (train, eval, inference)",
)
parser.add_argument(
"--exp-config",
type=str,
required=True,
help="path to config yaml containing info about experiment",
)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
parser.add_argument('--local_rank', type=int, default=0, help="local gpu id")
args = parser.parse_args()
run_exp(**vars(args))
def run_exp(exp_name: str, exp_config: str,
run_type: str, opts=None, local_rank=None) -> None:
r"""Runs experiment given mode and config
Args:
exp_config: path to config file.
run_type: "train" or "eval.
opts: list of strings of additional config options.
Returns:
None.
"""
config = get_config(exp_config, opts)
config.defrost()
config.TENSORBOARD_DIR += exp_name
config.CHECKPOINT_FOLDER += exp_name
if os.path.isdir(config.EVAL_CKPT_PATH_DIR):
config.EVAL_CKPT_PATH_DIR += exp_name
config.RESULTS_DIR += exp_name
config.VIDEO_DIR += exp_name
# config.TASK_CONFIG.TASK.RXR_INSTRUCTION_SENSOR.max_text_len = config.IL.max_text_len
config.LOG_FILE = exp_name + '_' + config.LOG_FILE
if 'CMA' in config.MODEL.policy_name and 'r2r' in config.BASE_TASK_CONFIG_PATH:
config.TASK_CONFIG.DATASET.DATA_PATH = 'data/datasets/R2R_VLNCE_v1-2_preprocessed/{split}/{split}.json.gz'
# config.local_rank = int(os.environ['LOCAL_RANK'])
config.local_rank = local_rank
config.freeze()
os.system("mkdir -p data/logs/running_log")
logger.add_filehandler('data/logs/running_log/'+config.LOG_FILE)
print("SEED:", config.TASK_CONFIG.SEED)
random.seed(config.TASK_CONFIG.SEED)
np.random.seed(config.TASK_CONFIG.SEED)
torch.manual_seed(config.TASK_CONFIG.SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = False
if torch.cuda.is_available():
torch.set_num_threads(1)
# if run_type == "eval" and config.EVAL.EVAL_NONLEARNING:
# evaluate_agent(config)
# return
# if run_type == "inference" and config.INFERENCE.INFERENCE_NONLEARNING:
# nonlearning_inference(config)
# return
trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
trainer = trainer_init(config)
# import pdb; pdb.set_trace()
if run_type == "train":
trainer.train()
elif run_type == "eval":
trainer.eval()
elif run_type == "inference":
trainer.inference()
if __name__ == "__main__":
main()