You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Im trying to load a custom dataset which has multiple videos for the same sequence (6 videos), I need to load them together at once. Please find the custom dataloader i defined,
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import random
import numpy as np
import pandas
import slowfast.utils.logging as logging
import torch
import torch.utils.data
from slowfast.utils.env import pathmgr
from torchvision import transforms
from . import (
decoder as decoder,
transform as transform,
utils as utils,
video_container as container,
)
from .build import DATASET_REGISTRY
from .random_erasing import RandomErasing
from .transform import create_random_augment, MaskingGenerator, MaskingGenerator3D
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class Kinetics_v2(torch.utils.data.Dataset):
"""
Kinetics video loader. Construct the Kinetics video loader, then sample
clips from the videos. For training and validation, a single clip is
randomly sampled from every video with random cropping, scaling, and
flipping. For testing, multiple clips are uniformaly sampled from every
video with uniform cropping. For uniform cropping, we take the left, center,
and right crop if the width is larger than height, or take top, center, and
bottom crop if the height is larger than the width.
"""
def __init__(self, cfg, mode, num_retries=100):
"""
Construct the Kinetics video loader with a given csv file. The format of
the csv file is:
```
path_to_video_1 label_1
path_to_video_2 label_2
...
path_to_video_N label_N
```
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
For the train and val mode, the data loader will take data
from the train or val set, and sample one clip per video.
For the test mode, the data loader will take data from test set,
and sample multiple clips per video.
num_retries (int): number of retries.
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported for Kinetics".format(mode)
self.mode = mode
self.cfg = cfg
self.p_convert_gray = self.cfg.DATA.COLOR_RND_GRAYSCALE
self.p_convert_dt = self.cfg.DATA.TIME_DIFF_PROB
self._video_meta = {}
self._num_retries = num_retries
self._num_epoch = 0.0
self._num_yielded = 0
self.gaze = None
self.ego = None
self.skip_rows = self.cfg.DATA.SKIP_ROWS
self.use_chunk_loading = (
True
if self.mode in ["train"] and self.cfg.DATA.LOADER_CHUNK_SIZE > 0
else False
)
self.dummy_output = None
# For training or validation mode, one single clip is sampled from every
# video. For testing, NUM_ENSEMBLE_VIEWS clips are sampled from every
# video. For every clip, NUM_SPATIAL_CROPS is cropped spatially from
# the frames.
if self.mode in ["train", "val"]:
self._num_clips = 1
elif self.mode in ["test"]:
self._num_clips = cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS
logger.info("Constructing Kinetics {}...".format(mode))
self._construct_loader()
self.aug = False
self.rand_erase = False
self.use_temporal_gradient = False
self.temporal_gradient_rate = 0.0
self.cur_epoch = 0
if self.mode == "train" and self.cfg.AUG.ENABLE:
self.aug = True
if self.cfg.AUG.RE_PROB > 0:
self.rand_erase = True
def _construct_loader(self):
"""
Construct the video loader.
"""
# import pdb;pdb.set_trace()
path_to_file = os.path.join(
self.cfg.DATA.PATH_TO_DATA_DIR, "{}.csv".format(self.mode)
)
assert pathmgr.exists(path_to_file), "{} dir not found".format(path_to_file)
self._path_to_videos_1 = []
self._path_to_videos_2 = []
self._path_to_videos_3 = []
self._path_to_videos_4 = []
self._path_to_videos_5 = []
self._path_to_videos_6 = []
self.gaze = []
self.ego = []
self._labels = []
self._spatial_temporal_idx = []
self.cur_iter = 0
self.chunk_epoch = 0
self.epoch = 0.0
self.skip_rows = self.cfg.DATA.SKIP_ROWS
path_ = None
#import pdb;pdb.set_trace()
with pathmgr.open(path_to_file, "r") as f:
if self.use_chunk_loading:
rows = self._get_chunk(f, self.cfg.DATA.LOADER_CHUNK_SIZE)
else:
#import pdb;pdb.set_trace()
rows = f.read().splitlines()
for clip_idx, annotation in enumerate(rows):
fetch_info = annotation.split(',',3)
if len(fetch_info) == 4:
path, label, gaze, ego = fetch_info
#self.gaze = gaze
#self.ego = ego
else:
import pdb;pdb.set_trace()
raise RuntimeError(
"Failed to parse video fetch {} info {} retries.".format(
path_to_file, fetch_info
)
)
direc = "/scratch/xxx/dataset/test/"
f1 = os.path.join(direc,'view0_common')
f2 = os.path.join(direc,'view1_common')
f3 = os.path.join(direc,'view2_common')
f4 = os.path.join(direc,'view3_common')
f5 = os.path.join(direc,'view4_common')
f6 = os.path.join(direc,'view5_common')
path1 = os.path.join(f1,path)
path2 = os.path.join(f2,path)
path3 = os.path.join(f3,path)
path4 = os.path.join(f4,path)
path5 = os.path.join(f5,path)
path6 = os.path.join(f6,path)
if not (os.path.exists(path1) and os.path.exists(path2) and os.path.exists(path3) and os.path.exists(path4) and os.path.exists(path5) and os.path.exists(path6)):
continue
for idx in range(self._num_clips):
self._path_to_videos_1.append(
os.path.join(self.cfg.DATA.PATH_PREFIX, path1)
)
self._path_to_videos_2.append(
os.path.join(self.cfg.DATA.PATH_PREFIX, path2)
)
self._path_to_videos_3.append(
os.path.join(self.cfg.DATA.PATH_PREFIX, path3)
)
self._path_to_videos_4.append(
os.path.join(self.cfg.DATA.PATH_PREFIX, path4)
)
self._path_to_videos_5.append(
os.path.join(self.cfg.DATA.PATH_PREFIX, path5)
)
self._path_to_videos_6.append(
os.path.join(self.cfg.DATA.PATH_PREFIX, path6)
)
ego = list(map(int, ego.strip('[]').split()))
ego = [torch.tensor(i,dtype=torch.int8) for i in ego]
gaze = torch.tensor(int(gaze),dtype=torch.int8)
self.gaze.append(gaze)
self.ego.append(ego)
self._labels.append(int(label))
self._spatial_temporal_idx.append(idx)
self._video_meta[clip_idx * self._num_clips + idx] = {}
# self._path_to_videos_3 =
assert (
len(self._path_to_videos_1) > 0
), "Failed to load Kinetics split {} from {}".format(
self._split_idx, path_to_file
)
logger.info(
"Constructing kinetics dataloader (size: {} skip_rows {}) from {} ".format(
len(self._path_to_videos_1), self.skip_rows, path_to_file
)
)
def _set_epoch_num(self, epoch):
self.epoch = epoch
def _get_chunk(self, path_to_file, chunksize):
try:
for chunk in pandas.read_csv(
path_to_file,
chunksize=self.cfg.DATA.LOADER_CHUNK_SIZE,
skiprows=self.skip_rows,
):
break
except Exception:
self.skip_rows = 0
return self._get_chunk(path_to_file, chunksize)
else:
return pandas.array(chunk.values.flatten(), dtype="string")
def __getitem__(self, index):
"""
Given the video index, return the list of frames, label, and video
index if the video can be fetched and decoded successfully, otherwise
repeatly find a random video that can be decoded as a replacement.
Args:
index (int): the video index provided by the pytorch sampler.
Returns:
frames (tensor): the frames of sampled from the video. The dimension
is `channel` x `num frames` x `height` x `width`.
label (int): the label of the current video.
index (int): if the video provided by pytorch sampler can be
decoded, then return the index of the video. If not, return the
index of the video replacement that can be decoded.
"""
short_cycle_idx = None
# When short cycle is used, input index is a tupple.
if isinstance(index, tuple):
index, self._num_yielded = index
if self.cfg.MULTIGRID.SHORT_CYCLE:
index, short_cycle_idx = index
if self.dummy_output is not None:
return self.dummy_output
if self.mode in ["train", "val"]:
# -1 indicates random sampling.
temporal_sample_index = -1
spatial_sample_index = -1
min_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[0]
max_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[1]
crop_size = self.cfg.DATA.TRAIN_CROP_SIZE
if short_cycle_idx in [0, 1]:
crop_size = int(
round(
self.cfg.MULTIGRID.SHORT_CYCLE_FACTORS[short_cycle_idx]
* self.cfg.MULTIGRID.DEFAULT_S
)
)
if self.cfg.MULTIGRID.DEFAULT_S > 0:
# Decreasing the scale is equivalent to using a larger "span"
# in a sampling grid.
min_scale = int(
round(float(min_scale) * crop_size / self.cfg.MULTIGRID.DEFAULT_S)
)
elif self.mode in ["test"]:
temporal_sample_index = (
self._spatial_temporal_idx[index] // self.cfg.TEST.NUM_SPATIAL_CROPS
)
# spatial_sample_index is in [0, 1, 2]. Corresponding to left,
# center, or right if width is larger than height, and top, middle,
# or bottom if height is larger than width.
spatial_sample_index = (
(self._spatial_temporal_idx[index] % self.cfg.TEST.NUM_SPATIAL_CROPS)
if self.cfg.TEST.NUM_SPATIAL_CROPS > 1
else 1
)
min_scale, max_scale, crop_size = (
[self.cfg.DATA.TEST_CROP_SIZE] * 3
if self.cfg.TEST.NUM_SPATIAL_CROPS > 1
else [self.cfg.DATA.TRAIN_JITTER_SCALES[0]] * 2
+ [self.cfg.DATA.TEST_CROP_SIZE]
)
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale}) == 1
else:
raise NotImplementedError("Does not support {} mode".format(self.mode))
num_decode = (
self.cfg.DATA.TRAIN_CROP_NUM_TEMPORAL if self.mode in ["train"] else 1
)
min_scale, max_scale, crop_size = [min_scale], [max_scale], [crop_size]
if len(min_scale) < num_decode:
min_scale += [self.cfg.DATA.TRAIN_JITTER_SCALES[0]] * (
num_decode - len(min_scale)
)
max_scale += [self.cfg.DATA.TRAIN_JITTER_SCALES[1]] * (
num_decode - len(max_scale)
)
crop_size += (
[self.cfg.MULTIGRID.DEFAULT_S] * (num_decode - len(crop_size))
if self.cfg.MULTIGRID.LONG_CYCLE or self.cfg.MULTIGRID.SHORT_CYCLE
else [self.cfg.DATA.TRAIN_CROP_SIZE] * (num_decode - len(crop_size))
)
assert self.mode in ["train", "val"]
# Try to decode and sample a clip from a video. If the video can not be
# decoded, repeatly find a random video replacement that can be decoded.
clean = True
while clean:
#while retry:
#for i_try in range(self._num_retries):
FRAMES=[]
path_list = [self._path_to_videos_1, self._path_to_videos_2, self._path_to_videos_3, self._path_to_videos_4, self._path_to_videos_5, self._path_to_videos_6]
video_container = None
for path_ in path_list:
################################## LOADING ######################################################
if isinstance(index, list):
index=index[0]
try:
video_container = container.get_video_container(
path_[index],
self.cfg.DATA_LOADER.ENABLE_MULTI_THREAD_DECODE,
self.cfg.DATA.DECODING_BACKEND,
)
except Exception as e:
import pdb;pdb.set_trace()
logger.info(
"Failed to load video from {} with error {}".format(
path_[index], e
)
)
# if self.mode not in ["test"]:
# # let's try another one
# #index = random.randint(0, len(path_) - 1)
#continue # Select a random video if the current video was not able to access.
index = random.randint(0, len(path_) - 1)
break
if video_container is None:
index = random.randint(0, len(path_) - 1)
print("print video container is empty", path_[index])
break
################################## LOADING ######################################################
frames_decoded, time_idx_decoded = (
[None] * num_decode,
[None] * num_decode,
)
# for i in range(num_decode):
num_frames = [self.cfg.DATA.NUM_FRAMES]
sampling_rate = utils.get_random_sampling_rate(
self.cfg.MULTIGRID.LONG_CYCLE_SAMPLING_RATE,
self.cfg.DATA.SAMPLING_RATE,
)
sampling_rate = [sampling_rate]
if len(num_frames) < num_decode:
num_frames.extend(
[num_frames[-1] for i in range(num_decode - len(num_frames))]
)
# base case where keys have same frame-rate as query
sampling_rate.extend(
[sampling_rate[-1] for i in range(num_decode - len(sampling_rate))]
)
elif len(num_frames) > num_decode:
num_frames = num_frames[:num_decode]
sampling_rate = sampling_rate[:num_decode]
if self.mode in ["train"]:
assert len(min_scale) == len(max_scale) == len(crop_size) == num_decode
target_fps = self.cfg.DATA.TARGET_FPS
if self.cfg.DATA.TRAIN_JITTER_FPS > 0.0 and self.mode in ["train"]:
target_fps += random.uniform(0.0, self.cfg.DATA.TRAIN_JITTER_FPS)
################################### DECODING #################################################
try:# Decode video. Meta info is used to perform selective decoding.
frames, time_idx, tdiff = decoder.decode(
video_container,
sampling_rate,
num_frames,
temporal_sample_index,
self.cfg.TEST.NUM_ENSEMBLE_VIEWS,
video_meta=(
self._video_meta[index] if len(self._video_meta) < 5e6 else {}
), # do not cache on huge datasets
target_fps=target_fps,
backend=self.cfg.DATA.DECODING_BACKEND,
use_offset=self.cfg.DATA.USE_OFFSET_SAMPLING,
max_spatial_scale=(
min_scale[0] if all(x == min_scale[0] for x in min_scale) else 0
), # if self.mode in ["test"] else 0,
time_diff_prob=self.p_convert_dt if self.mode in ["train"] else 0.0,
temporally_rnd_clips=True,
min_delta=self.cfg.CONTRASTIVE.DELTA_CLIPS_MIN,
max_delta=self.cfg.CONTRASTIVE.DELTA_CLIPS_MAX,
)
except:
#index = random.randint(0, len(path_) - 1)
index = random.randint(0, len(path_) - 1)
print("couldnt decode the video", path_[index])
break
#import pdb;pdb.set_trace()
frames_decoded = frames
time_idx_decoded = time_idx
# If decoding failed (wrong format, video is too short, and etc),
# select another video.
if frames_decoded is None or None in frames_decoded:
print("cant decode the video",path_[index])
break
################################### DECODING #################################################
#import pdb;pdb.set_trace()
gaze = self.gaze[index]
ego = self.ego[index]
num_aug = (
self.cfg.DATA.TRAIN_CROP_NUM_SPATIAL * self.cfg.AUG.NUM_SAMPLE
if self.mode in ["train"]
else 1
)
num_out = num_aug * num_decode
f_out, time_idx_out = [None] * num_out, [None] * num_out
idx = -1
label = self._labels[index]
for i in range(num_decode):
for _ in range(num_aug):
idx += 1
f_out[idx] = frames_decoded[i].clone()
time_idx_out[idx] = time_idx_decoded[i, :]
f_out[idx] = f_out[idx].float()
f_out[idx] = f_out[idx] / 255.0
if self.mode in ["train"] and self.cfg.DATA.SSL_COLOR_JITTER:
f_out[idx] = transform.color_jitter_video_ssl(
f_out[idx],
bri_con_sat=self.cfg.DATA.SSL_COLOR_BRI_CON_SAT,
hue=self.cfg.DATA.SSL_COLOR_HUE,
p_convert_gray=self.p_convert_gray,
moco_v2_aug=self.cfg.DATA.SSL_MOCOV2_AUG,
gaussan_sigma_min=self.cfg.DATA.SSL_BLUR_SIGMA_MIN,
gaussan_sigma_max=self.cfg.DATA.SSL_BLUR_SIGMA_MAX,
)
if self.aug and self.cfg.AUG.AA_TYPE:
aug_transform = create_random_augment(
input_size=(f_out[idx].size(1), f_out[idx].size(2)),
auto_augment=self.cfg.AUG.AA_TYPE,
interpolation=self.cfg.AUG.INTERPOLATION,
)
# T H W C -> T C H W.
f_out[idx] = f_out[idx].permute(0, 3, 1, 2)
list_img = self._frame_to_list_img(f_out[idx])
list_img = aug_transform(list_img)
f_out[idx] = self._list_img_to_frames(list_img)
f_out[idx] = f_out[idx].permute(0, 2, 3, 1)
# Perform color normalization.
f_out[idx] = utils.tensor_normalize(
f_out[idx], self.cfg.DATA.MEAN, self.cfg.DATA.STD
)
# T H W C -> C T H W.
f_out[idx] = f_out[idx].permute(3, 0, 1, 2)
scl, asp = (
self.cfg.DATA.TRAIN_JITTER_SCALES_RELATIVE,
self.cfg.DATA.TRAIN_JITTER_ASPECT_RELATIVE,
)
relative_scales = (
None if (self.mode not in ["train"] or len(scl) == 0) else scl
)
relative_aspect = (
None if (self.mode not in ["train"] or len(asp) == 0) else asp
)
f_out[idx] = utils.spatial_sampling(
f_out[idx],
spatial_idx=spatial_sample_index,
min_scale=min_scale[i],
max_scale=max_scale[i],
crop_size=crop_size[i],
#random_horizontal_flip=self.cfg.DATA.RANDOM_FLIP,
random_horizontal_flip=False,
inverse_uniform_sampling=self.cfg.DATA.INV_UNIFORM_SAMPLE,
aspect_ratio=relative_aspect,
scale=relative_scales,
motion_shift=(
self.cfg.DATA.TRAIN_JITTER_MOTION_SHIFT
if self.mode in ["train"]
else False
),
)
if self.rand_erase:
erase_transform = RandomErasing(
self.cfg.AUG.RE_PROB,
mode=self.cfg.AUG.RE_MODE,
max_count=self.cfg.AUG.RE_COUNT,
num_splits=self.cfg.AUG.RE_COUNT,
device="cpu",
)
f_out[idx] = erase_transform(
f_out[idx].permute(1, 0, 2, 3)
).permute(1, 0, 2, 3)
f_out[idx] = utils.pack_pathway_output(self.cfg, f_out[idx])
if self.cfg.AUG.GEN_MASK_LOADER:
mask = self._gen_mask()
f_out[idx] = f_out[idx] + [torch.Tensor(), mask]
frames = f_out[0] if num_out == 1 else f_out
time_idx = np.array(time_idx_out)
if (
num_aug * num_decode > 1
and not self.cfg.MODEL.MODEL_NAME == "ContrastiveModel"
):
label = [label] * num_aug * num_decode
gaze = [gaze] * num_aug * num_decode
ego = [ego] * num_aug * num_decode
index = [index] * num_aug * num_decode
if self.cfg.DATA.DUMMY_LOAD:
if self.dummy_output is None:
self.dummy_output = (frames, label, index, time_idx, {})
#import pdb;pdb.set_trace()
FRAMES.append(frames)
if len(FRAMES) == 6:
# import pdb;pdb.set_trace()
return FRAMES, gaze, torch.FloatTensor(ego), label, index, time_idx, {}
else:
index = random.randint(0, len(path_) - 1)
print("Something is fishy")
def _gen_mask(self):
if self.cfg.AUG.MASK_TUBE:
num_masking_patches = round(
np.prod(self.cfg.AUG.MASK_WINDOW_SIZE) * self.cfg.AUG.MASK_RATIO
)
min_mask = num_masking_patches // 5
masked_position_generator = MaskingGenerator(
mask_window_size=self.cfg.AUG.MASK_WINDOW_SIZE,
num_masking_patches=num_masking_patches,
max_num_patches=None,
min_num_patches=min_mask,
)
mask = masked_position_generator()
mask = np.tile(mask, (8, 1, 1))
elif self.cfg.AUG.MASK_FRAMES:
mask = np.zeros(shape=self.cfg.AUG.MASK_WINDOW_SIZE, dtype=int)
n_mask = round(self.cfg.AUG.MASK_WINDOW_SIZE[0] * self.cfg.AUG.MASK_RATIO)
mask_t_ind = random.sample(
range(0, self.cfg.AUG.MASK_WINDOW_SIZE[0]), n_mask
)
mask[mask_t_ind, :, :] += 1
else:
num_masking_patches = round(
np.prod(self.cfg.AUG.MASK_WINDOW_SIZE) * self.cfg.AUG.MASK_RATIO
)
max_mask = np.prod(self.cfg.AUG.MASK_WINDOW_SIZE[1:])
min_mask = max_mask // 5
masked_position_generator = MaskingGenerator3D(
mask_window_size=self.cfg.AUG.MASK_WINDOW_SIZE,
num_masking_patches=num_masking_patches,
max_num_patches=max_mask,
min_num_patches=min_mask,
)
mask = masked_position_generator()
return mask
def _frame_to_list_img(self, frames):
img_list = [transforms.ToPILImage()(frames[i]) for i in range(frames.size(0))]
return img_list
def _list_img_to_frames(self, img_list):
img_list = [transforms.ToTensor()(img) for img in img_list]
return torch.stack(img_list)
def __len__(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return self.num_videos
@property
def num_videos(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return len(self._path_to_videos_1)
After loading, I used MViTv2_S model to train them. I have 250 videos in training set and 50 in test set. During training, the loss converges, but the confusion matrix says it always predicts class 0.
Please help me navigate through this issue, thanks !!
The text was updated successfully, but these errors were encountered:
Im trying to load a custom dataset which has multiple videos for the same sequence (6 videos), I need to load them together at once. Please find the custom dataloader i defined,
After loading, I used MViTv2_S model to train them. I have 250 videos in training set and 50 in test set. During training, the loss converges, but the confusion matrix says it always predicts class 0.
Please help me navigate through this issue, thanks !!
The text was updated successfully, but these errors were encountered: