Skip to content

Commit

Permalink
Update Dataset and Support MMDET Training
Browse files Browse the repository at this point in the history
  • Loading branch information
OceanPang committed Sep 8, 2020
1 parent 32a1e42 commit 77904c9
Show file tree
Hide file tree
Showing 17 changed files with 294 additions and 331 deletions.
35 changes: 18 additions & 17 deletions configs/quasi_dense_r50_12e.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@
dict(type='SeqDefaultFormatBundle'),
dict(
type='SeqCollect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_mids'],
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_match_indices'],
ref_prefix='ref'),
]
test_pipeline = [
Expand All @@ -191,27 +191,28 @@
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'img_shape', 'scale_factor', 'flip',
'img_norm_cfg', 'frame_id')),
dict(type='VideoCollect', keys=['img'])
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=dict(
DET=data_root + 'detection/annotations/train_coco-format.json',
VID=data_root + 'tracking/annotations/train_coco-format.json'),
img_prefix=dict(
DET=data_root + 'detection/images/train/',
VID=data_root + 'tracking/images/train/'),
key_img_sampler=dict(interval=1),
ref_img_sampler=dict(num=1, scope=3, method='uniform'),
pipeline=train_pipeline),
train=[
dict(
type=dataset_type,
ann_file=data_root + 'tracking/annotations/train_coco-format.json',
img_prefix=data_root + 'tracking/images/train/',
key_img_sampler=dict(interval=1),
ref_img_sampler=dict(num_ref_imgs=1, scope=3, method='uniform'),
pipeline=train_pipeline),
dict(
type=dataset_type,
load_as_video=False,
ann_file=data_root +
'detection/annotations/train_coco-format.json',
img_prefix=data_root + 'detection/images/train/',
pipeline=train_pipeline)
],
val=dict(
type=dataset_type,
ann_file=data_root + 'tracking/annotations/val_coco-format.json',
Expand Down
43 changes: 0 additions & 43 deletions qdtrack/apis/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
# collect results from all ranks
if gpu_collect:
raise NotImplementedError
# results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
Expand Down Expand Up @@ -115,47 +114,5 @@ def collect_results_cpu(result_part, size, tmpdir=None):
part_file = mmcv.load(part_file)
for k, v in part_file.items():
part_list[k].extend(v)
# TODO: consider the case for DET
# # sort the results
# ordered_results = []
# for res in zip(*part_list):
# ordered_results.extend(list(res))
# # the dataloader may pad some samples
# ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return part_list


# def collect_results_gpu(result_part, size):
# rank, world_size = get_dist_info()
# # dump result part to tensor with pickle
# part_tensor = torch.tensor(
# bytearray(pickle.dumps(result_part)), dtype=torch.uint8,
# device='cuda')
# # gather all result part tensor shape
# shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
# shape_list = [shape_tensor.clone() for _ in range(world_size)]
# dist.all_gather(shape_list, shape_tensor)
# # padding result part tensor to max length
# shape_max = torch.tensor(shape_list).max()
# part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
# part_send[:shape_tensor[0]] = part_tensor
# part_recv_list = [
# part_tensor.new_zeros(shape_max) for _ in range(world_size)
# ]
# # gather all result part
# dist.all_gather(part_recv_list, part_send)

# if rank == 0:
# part_list = []
# for recv, shape in zip(part_recv_list, shape_list):
# part_list.append(
# pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# # sort the results
# ordered_results = []
# for res in zip(*part_list):
# ordered_results.extend(list(res))
# # the dataloader may pad some samples
# ordered_results = ordered_results[:size]
# return ordered_results
11 changes: 6 additions & 5 deletions qdtrack/apis/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,12 @@
from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,
OptimizerHook, build_optimizer)
from mmcv.utils import build_from_cfg
from mmdet.core import DistEvalHook, EvalHook, Fp16OptimizerHook
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.utils import get_root_logger
from mmdet.core import Fp16OptimizerHook
from mmdet.datasets import build_dataset

from qdtrack.datasets import build_video_dataloader
from qdtrack.core import DistEvalHook, EvalHook
from qdtrack.datasets import build_dataloader
from qdtrack.utils import get_root_logger


def train_model(model,
Expand Down Expand Up @@ -91,7 +92,7 @@ def train_model(model,
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_video_dataloader(
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
Expand Down
3 changes: 2 additions & 1 deletion qdtrack/core/evaluation/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from .eval_hooks import EvalHook, DistEvalHook
from .mot import eval_mot

__all__ = ['eval_mot']
__all__ = ['eval_mot', 'EvalHook', 'DistEvalHook']
33 changes: 33 additions & 0 deletions qdtrack/core/evaluation/eval_hooks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import os.path as osp

from mmdet.core import DistEvalHook as _DistEvalHook
from mmdet.core import EvalHook as _EvalHook


class EvalHook(_EvalHook):

def after_train_epoch(self, runner):
if not self.evaluation_flag(runner):
return
from qdtrack.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)


class DistEvalHook(_DistEvalHook):

def after_train_epoch(self, runner):
if not self.evaluation_flag(runner):
return
from qdtrack.apis import multi_gpu_test
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
14 changes: 6 additions & 8 deletions qdtrack/datasets/__init__.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,16 @@
from mmdet.datasets.builder import (DATASETS, PIPELINES, build_dataloader,
build_dataset)
from mmdet.datasets.builder import (DATASETS, PIPELINES, build_dataset)

from .bdd_video_dataset import BDDVideoDataset
from .builder import build_video_dataloader
from .builder import build_dataloader
from .coco_video_dataset import CocoVideoDataset
from .parsers import CocoVID
from .pipelines import (LoadMultiImagesFromFile, SeqCollect,
SeqDefaultFormatBundle, SeqLoadAnnotations,
SeqNormalize, SeqPad, SeqRandomFlip, SeqResize)

__all__ = [
'DATASETS', 'PIPELINES', 'build_dataloader', 'build_video_dataloader',
'build_dataset', 'CocoVID', 'BDDVideoDataset', 'CocoVideoDataset',
'LoadMultiImagesFromFile', 'SeqLoadAnnotations', 'SeqResize',
'SeqNormalize', 'SeqRandomFlip', 'SeqPad', 'SeqDefaultFormatBundle',
'SeqCollect'
'DATASETS', 'PIPELINES', 'build_dataloader', 'build_dataset', 'CocoVID',
'BDDVideoDataset', 'CocoVideoDataset', 'LoadMultiImagesFromFile',
'SeqLoadAnnotations', 'SeqResize', 'SeqNormalize', 'SeqRandomFlip',
'SeqPad', 'SeqDefaultFormatBundle', 'SeqCollect'
]
28 changes: 15 additions & 13 deletions qdtrack/datasets/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,20 @@
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmdet.datasets.samplers import GroupSampler
from mmdet.datasets.samplers import DistributedGroupSampler, GroupSampler
from torch.utils.data import DataLoader

from .samplers import DistributedVideoSampler


def build_video_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=False,
seed=None,
**kwargs):
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
Expand All @@ -38,12 +38,14 @@ def build_video_dataloader(dataset,
Returns:
DataLoader: A PyTorch dataloader.
"""
if shuffle:
raise ValueError('This dataloader is specifically for video testing.')
rank, world_size = get_dist_info()
if dist:
sampler = DistributedVideoSampler(
dataset, world_size, rank, shuffle=False)
if shuffle:
sampler = DistributedGroupSampler(dataset, samples_per_gpu,
world_size, rank)
else:
sampler = DistributedVideoSampler(
dataset, world_size, rank, shuffle=False)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
Expand Down
Loading

0 comments on commit 77904c9

Please sign in to comment.