Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

changes #5

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions cocdownload.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import fiftyone as fo
import fiftyone.zoo as foz

dataset = foz.load_zoo_dataset(
"coco/coco-2014",
splits=["validation", "test"],
label_types=["segmentations"],
max_samples=1000,
shuffle=True,
format=".jpg"
)
session = fo.launch_app(dataset)
session.dataset = dataset
1 change: 1 addition & 0 deletions coco_splits/split_-1.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"val": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80]}
1 change: 1 addition & 0 deletions coco_splits/split_0.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"val": [1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69, 73, 77], "train": [2, 3, 4, 6, 7, 8, 10, 11, 12, 14, 15, 16, 18, 19, 20, 22, 23, 24, 26, 27, 28, 30, 31, 32, 34, 35, 36, 38, 39, 40, 42, 43, 44, 46, 47, 48, 50, 51, 52, 54, 55, 56, 58, 59, 60, 62, 63, 64, 66, 67, 68, 70, 71, 72, 74, 75, 76, 78, 79, 80]}
1 change: 1 addition & 0 deletions coco_splits/split_1.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"val": [2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62, 66, 70, 74, 78], "train": [1, 3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16, 17, 19, 20, 21, 23, 24, 25, 27, 28, 29, 31, 32, 33, 35, 36, 37, 39, 40, 41, 43, 44, 45, 47, 48, 49, 51, 52, 53, 55, 56, 57, 59, 60, 61, 63, 64, 65, 67, 68, 69, 71, 72, 73, 75, 76, 77, 79, 80]}
1 change: 1 addition & 0 deletions coco_splits/split_2.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"val": [3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63, 67, 71, 75, 79], "train": [1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45, 46, 48, 49, 50, 52, 53, 54, 56, 57, 58, 60, 61, 62, 64, 65, 66, 68, 69, 70, 72, 73, 74, 76, 77, 78, 80]}
1 change: 1 addition & 0 deletions coco_splits/split_3.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"val": [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80], "train": [1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 21, 22, 23, 25, 26, 27, 29, 30, 31, 33, 34, 35, 37, 38, 39, 41, 42, 43, 45, 46, 47, 49, 50, 51, 53, 54, 55, 57, 58, 59, 61, 62, 63, 65, 66, 67, 69, 70, 71, 73, 74, 75, 77, 78, 79]}
147 changes: 147 additions & 0 deletions config/H_48_D_4_proto.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
{
"dataset": "coco",
"method": "fcn_segmentor",
"data": {
"image_tool": "cv2",
"input_mode": "BGR",
"num_classes": 19,
"label_list": [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33],
"data_dir": "/teamspace/studios/this_studio/lessdata",
"workers": 1
},
"train": {
"batch_size": 1,
"data_transformer": {
"size_mode": "fix_size",
"input_size": [1024, 512],
"align_method": "only_pad",
"pad_mode": "random"
}
},
"val": {
"batch_size": 1,
"mode": "ss_test",
"data_transformer": {
"size_mode": "fix_size",
"input_size": [2048, 1024],
"align_method": "only_pad"
}
},
"test": {
"batch_size": 1,
"mode": "ss_test",
"out_dir": "/msravcshare/dataset/seg_result/cityscapes",
"data_transformer": {
"size_mode": "fix_size",
"input_size": [2048, 1024],
"align_method": "only_pad"
}
},
"train_trans": {
"trans_seq": ["random_resize", "random_crop", "random_hflip", "random_brightness"],
"random_brightness": {
"ratio": 1.0,
"shift_value": 10
},
"random_hflip": {
"ratio": 0.5,
"swap_pair": []
},
"random_resize": {
"ratio": 1.0,
"method": "random",
"scale_range": [0.5, 2.0],
"aspect_range": [0.9, 1.1]
},
"random_crop":{
"ratio": 1.0,
"crop_size": [1024, 512],
"method": "random",
"allow_outside_center": false
}
},
"val_trans": {
"trans_seq": []
},
"normalize": {
"div_value": 255.0,
"mean_value": [0.485, 0.456, 0.406],
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225]
},
"checkpoints": {
"checkpoints_name": "fs_baseocnet_cityscapes_seg",
"checkpoints_dir": "./checkpoints/cityscapes",
"save_iters": 1000
},
"network":{
"backbone": "deepbase_resnet101_dilated8",
"multi_grid": [1, 1, 1],
"model_name": "base_ocnet",
"bn_type": "torchsyncbn",
"stride": 8,
"factors": [[8, 8]],
"loss_weights": {
"corr_loss": 0.01,
"aux_loss": 0.4,
"seg_loss": 1.0
}
},
"logging": {
"logfile_level": "info",
"stdout_level": "info",
"log_file": "./log/cityscapes/fs_baseocnet_cityscapes_seg.log",
"log_format": "%(asctime)s %(levelname)-7s %(message)s",
"rewrite": true
},
"lr": {
"base_lr": 0.01,
"metric": "iters",
"lr_policy": "lambda_poly",
"step": {
"gamma": 0.5,
"step_size": 100
}
},
"solver": {
"display_iter": 10,
"test_interval": 2000,
"max_iters": 40000
},
"optim": {
"optim_method": "sgd",
"adam": {
"betas": [0.9, 0.999],
"eps": 1e-08,
"weight_decay": 0.0001
},
"sgd": {
"weight_decay": 0.0005,
"momentum": 0.9,
"nesterov": false
}
},
"loss": {
"loss_type": "pixel_prototype_ce_loss",
"params": {
"ce_weight": [0.8373, 0.9180, 0.8660, 1.0345, 1.0166, 0.9969, 0.9754,
1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037,
1.0865, 1.0955, 1.0865, 1.1529, 1.0507],
"ce_reduction": "mean",
"ce_ignore_index": -1,
"ohem_minkeep": 100000,
"ohem_thresh": 0.9
}
},
"protoseg": {
"gamma": 0.999,
"loss_ppc_weight": 0.01,
"loss_ppd_weight": 0.001,
"num_prototype": 10,
"pretrain_prototype": false,
"use_rmi": false,
"use_prototype": true,
"update_prototype": true,
"warmup_iters": 0
}
}
12 changes: 6 additions & 6 deletions config/coco.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ DATA:
val_list: lists/coco/val.txt
split: 0
use_split_coco: True
workers: 3
workers: 1
image_size: 417
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
Expand All @@ -24,10 +24,10 @@ EVALUATION:
ckpt_path: model_ckpt/
load_model_id: 1
ckpt_used: model
test_num: 10000
shot: 5
batch_size_val: 50
n_runs: 5
test_num: 5
shot: 2
batch_size_val: 1
n_runs: 2
support_only_one_novel: True
use_training_images_for_supports: False
generate_new_support_set_for_each_task: False
Expand All @@ -41,4 +41,4 @@ CLASSIFIER:
cls_lr: 0.00125
pi_estimation_strategy: self
pi_update_at: [10]
fine_tune_base_classifier: True
fine_tune_base_classifier: True
66 changes: 66 additions & 0 deletions config/coco_resnet_base.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
Data:
data_root: ../data/coco
train_list: ./lists/coco/train.txt
val_list: ./lists/coco/val.txt
classes: 61


Train:
# Aug
train_h: 417
train_w: 417
val_size: 417
scale_min: 0.5 # minimum random scale
scale_max: 2.0 # maximum random scale
rotate_min: -10 # minimum random rotate
rotate_max: 10 # maximum random rotate
ignore_label: 255
padding_label: 255
# Dataset & Mode
split: 0
data_set: 'coco'
use_split_coco: True # True means FWB setting
# Optimizer
batch_size: 3 # batch size for training (bs12 for 1GPU)
base_lr: 2.5e-4
epochs: 20
start_epoch: 0
stop_interval: 75 # stop when the best result is not updated for "stop_interval" epochs
index_split: -1 # index for determining the params group with 10x learning rate
power: 0.9 # 0 means no decay
momentum: 0.9
weight_decay: 0.0001
warmup: False
# Viz & Save & Resume
print_freq: 10
save_freq: 5
resume: # path to latest checkpoint (default: none, such as epoch_10.pth)
# Validate
evaluate: True
fix_random_seed_val: True
batch_size_val: 4
resized_val: True
ori_resize: False # use original label for evaluation
# Else
workers: 8 # 8 data loader workers
manual_seed: 321
seed_deterministic: False
zoom_factor: 8 # zoom factor for final prediction during training, be in [1, 2, 4, 8]

Method:
layers: 50
vgg: False



## deprecated multi-processing training
# Distributed:
# dist_url: tcp://127.0.0.1:6789
# dist_backend: 'nccl'
# multiprocessing_distributed: False
# world_size: 1
# rank: 0
# use_apex: False
# opt_level: 'O0'
# keep_batchnorm_fp32:
# loss_scale:ls
3 changes: 2 additions & 1 deletion data/coco/create_masks.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
from pycocotools.coco import COCO

for dataset in ['train2014', 'val2014']:
annFile = os.path.join('annotations', f'instances_{dataset}.json')
annFile = os.path.join('coco-2014/raw', f'instances_{dataset}.json')
print(f'PATH...... {annFile}')
img_dir = dataset
save_dir = 'train' if 'train' in dataset else 'val'

Expand Down
9 changes: 9 additions & 0 deletions downloadCoco.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import fiftyone as fo
import fiftyone.zoo as foz

# To download the COCO dataset for only the "person" and "car" classes
dataset = foz.load_zoo_dataset(
"coco-2014",
splits=["train", "validation", "test"],
label_types=["detections", "segmentations"]
)
49 changes: 49 additions & 0 deletions model/ASPP.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data

class ASPP(nn.Module):
def __init__(self, out_channels=256):
super(ASPP, self).__init__()
self.layer6_0 = nn.Sequential(
nn.Conv2d(out_channels , out_channels, kernel_size=1, stride=1, padding=0, bias=True),
nn.ReLU(),
)
self.layer6_1 = nn.Sequential(
nn.Conv2d(out_channels , out_channels, kernel_size=1, stride=1, padding=0, bias=True),
nn.ReLU(),
)
self.layer6_2 = nn.Sequential(
nn.Conv2d(out_channels , out_channels , kernel_size=3, stride=1, padding=6,dilation=6, bias=True),
nn.ReLU(),
)
self.layer6_3 = nn.Sequential(
nn.Conv2d(out_channels , out_channels, kernel_size=3, stride=1, padding=12, dilation=12, bias=True),
nn.ReLU(),
)
self.layer6_4 = nn.Sequential(
nn.Conv2d(out_channels , out_channels , kernel_size=3, stride=1, padding=18, dilation=18, bias=True),
nn.ReLU(),
)

self._init_weight()

def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()

def forward(self, x):
feature_size = x.shape[-2:]
global_feature = F.avg_pool2d(x, kernel_size=feature_size)

global_feature = self.layer6_0(global_feature)

global_feature = global_feature.expand(-1, -1, feature_size[0], feature_size[1])
out = torch.cat(
[global_feature, self.layer6_1(x), self.layer6_2(x), self.layer6_3(x), self.layer6_4(x)], dim=1)
return out
Loading