-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
executable file
·173 lines (154 loc) · 5.83 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import logging
# from train_test import train, test
import train_test as baseline
import train_test_window as window
import warnings
from arg_parser import init_parser
from setproctitle import setproctitle as ptitle
from normalized_env import NormalizedEnv
import gym
import cache_env as cache_env
if __name__ == "__main__":
ptitle('test_wolp')
warnings.filterwarnings('ignore')
parser = init_parser('WOLP_DDPG')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_ids)[1:-1]
from util import get_output_folder, setup_logger
from wolp_agent import WolpertingerAgent
args.save_model_dir = get_output_folder('output', args.env)
env = cache_env.CacheEnv(args.cache_capacity)
# continuous = None
continuous = False
try:
# continuous action
nb_states = env.observation_space.shape[0]
nb_actions = env.action_space.shape[0]
action_high = env.action_space.high
action_low = env.action_space.low
continuous = True
env = NormalizedEnv(env)
except IndexError:
# discrete action for 1 dimension
nb_states = env.observation_space.shape[0] # state dimension
nb_actions = 1 # action dimension
max_actions = env.action_space.n
continuous = False
if args.seed > 0:
np.random.seed(args.seed)
env.seed(args.seed)
if continuous:
agent_args = {
'continuous':continuous,
'max_actions':None,
'action_low': action_low,
'action_high': action_high,
'nb_states': nb_states,
'nb_actions': nb_actions,
'args': args,
}
else:
agent_args = {
'continuous':continuous,
'max_actions':max_actions,
'action_low': None,
'action_high': None,
'nb_states': nb_states,
'nb_actions': nb_actions,
'args': args,
}
agent = WolpertingerAgent(**agent_args)
# if args.load:
# agent.load_weights(args.load_model_dir)
if args.gpu_ids[0] >= 0 and args.gpu_nums > 0:
agent.cuda_convert()
# set logger, log args here
log = {}
if args.mode == 'train':
setup_logger('RS_log', r'{}/RS_train_log'.format(args.save_model_dir))
elif args.mode == 'test':
setup_logger('RS_log', r'{}/RS_test_log'.format(args.save_model_dir))
else:
raise RuntimeError('undefined mode {}'.format(args.mode))
log['RS_log'] = logging.getLogger('RS_log') # singleton, return the cite of the same logger object
d_args = vars(args)
for k in d_args.keys():
log['RS_log'].info('{0}: {1}'.format(k, d_args[k]))
if args.mode == 'train':
if args.env == "CacheContent_window": # window experiment set
train_args = {
'continuous': continuous,
'env': env,
'agent': agent,
'max_episode': args.max_episode,
'warmup': args.warmup,
'save_model_dir': args.save_model_dir,
# 'max_episode_length': 20,
'max_episode_length': args.max_episode_length,
'logger': log['RS_log'],
'WINDOW': args.window_size,
'zipf_a': args.zipf_a,
'entire_request_nb': args.entire_request_nb,
'split_nb': args.split_nb,
'sub_request_len': args.sub_request_len,
}
# print('window experiment!')
log['RS_log'].info('window train experiment!')
window.train(**train_args)
else:
train_args = {
'continuous': continuous,
'env': env,
'agent': agent,
'max_episode': args.max_episode,
'warmup': args.warmup,
'save_model_dir': args.save_model_dir,
'max_episode_length': args.max_episode_length,
'logger': log['RS_log'],
'zipf_a': args.zipf_a,
'entire_request_nb': args.entire_request_nb,
'split_nb': args.split_nb,
'sub_request_len': args.sub_request_len,
}
# print('baseline experiment!')
log['RS_log'].info('baseline train experiment!')
baseline.train(**train_args)
elif args.mode == 'test':
if args.env == "CacheContent_window": # window experiment set
test_args = {
'env': env,
'agent': agent,
'model_path': args.load_model_dir,
'test_episode': args.test_episode,
'max_episode_length': args.max_episode_length,
# 'max_episode_length': 50,
'logger': log['RS_log'],
'WINDOW': args.window_size,
'zipf_a': args.zipf_a,
'entire_request_nb': args.entire_request_nb,
'split_nb': args.split_nb,
'sub_request_len': args.sub_request_len,
}
log['RS_log'].info('window test experiment!')
window.test(**test_args)
else:
test_args = {
'env':env,
'agent': agent,
'model_path': args.load_model_dir,
'test_episode':args.test_episode,
'max_episode_length': args.max_episode_length,
'logger': log['RS_log'],
'zipf_a': args.zipf_a,
'entire_request_nb': args.entire_request_nb,
'split_nb': args.split_nb,
'sub_request_len': args.sub_request_len,
}
log['RS_log'].info('baseline test experiment!')
baseline.test(**test_args)
else:
raise RuntimeError('undefined mode {}'.format(args.mode))