-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest-config-office-home.yaml
50 lines (44 loc) · 2.48 KB
/
test-config-office-home.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
data:
dataset:
name: officehome # choices are ['office', 'officehome', 'VisDA+ImageCLEF-DA']
root_path: /data/yinmingyue/Datasets/OfficeHome # /path/to/dataset/root
source1: 1 # No.1 source domain index
source2: 2 # No.2 source domain index
source3: 3 # No.3 source domain index
target: 0 # target domain index
n_share1: 4 # number of the common classes between the No.1 source domain and the target domain
n_source_private1: 2 # number of classes private to the No.1 source domain
n_share2: 4 # number of the common classes between the No.2 source domain and the target domain
n_source_private2: 2 # number of classes private to the No.2 source domain
n_share3: 4 # number of the common classes between the No.3 source domain and the target domain
n_source_private3: 2 # number of classes private to the No.3 source domain
n_private_common: 1 # number of common classes between the private label sets of No.1 source domain and the No.2 source domain
n_share_common: 10 # number of common classes between the whole source domain and the target domain
n_total: 65 # number of classes in total
dataloader:
class_balance: true # if class_balance when loading datasets
data_workers: 3 # how many workers to use for train dataloaders
batch_size: 36 # batch_size for source domain and target domain respectively
model:
base_model: resnet50 # backbone feature extractor
pretrained_model: Pre-trained/resnet50-19c8e357.pth # /path/to/pretrained/model
train:
min_step: 40000 # minimum steps to run. run epochs until it exceeds the minStep
lr: 0.01 # learning rate for new layers. learning rate for finetune is 1/10 of lr
weight_decay: 0.0005 # weight_decay for SGD optimizer
momentum: 0.9 # momentum for SGD optimizer
continue_training: False # continue to train on resume files: True / False
continue_step: 0 # the step continue to train
cut: .0 # cut threshold for normalizing weights
test:
test_interval: 200 # interval of two continuous test phase
test_only: True # test a given model and exit
resume_file: log/ClPrRw-Ar/best.pkl # model to test
w_0: 0.5 # the threshold for separating unknown-class samples from known-class samples
misc:
gpus: 1 # how many GPUs to be used, 0 indicates CPU only, needed GPU memory < 6 GB
gpu_id: "0" # which GPU you want to use
gpu_id_list: [0] # [0,...,gpus-1]
log:
root_dir: log # the log directory (log directory will be {root_dir}/{method}/time/)
log_interval: 10 # steps to log scalars