-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathutils.py
78 lines (64 loc) · 2.7 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import numpy as np
from collections import OrderedDict
import os
import glob
import cv2
import torch.utils.data as data
rng = np.random.RandomState(2020)
def np_load_frame(filename, resize_height, resize_width):
"""
Load image path and convert it to numpy.ndarray. Notes that the color channels are BGR and the color space
is normalized from [0, 255] to [-1, 1].
:param filename: the full path of image
:param resize_height: resized height
:param resize_width: resized width
:return: numpy.ndarray
"""
image_decoded = cv2.imread(filename)
image_resized = cv2.resize(image_decoded, (resize_width, resize_height))
image_resized = image_resized.astype(dtype=np.float32)
image_resized = (image_resized / 127.5) - 1.0
return image_resized
class DataLoader(data.Dataset):
def __init__(self, video_folder, transform, resize_height, resize_width, time_step=4, num_pred=1):
self.dir = video_folder
self.transform = transform
self.videos = OrderedDict()
self._resize_height = resize_height
self._resize_width = resize_width
self._time_step = time_step
self._num_pred = num_pred
self.setup()
self.samples = self.get_all_samples()
def setup(self):
videos = glob.glob(os.path.join(self.dir, '*'))
for video in sorted(videos):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(
os.path.join(video, '*.jpg'))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(
self.videos[video_name]['frame'])
def get_all_samples(self):
frames = []
videos = glob.glob(os.path.join(self.dir, '*'))
for video in sorted(videos):
video_name = video.split('/')[-1]
for i in range(len(self.videos[video_name]['frame'])-self._time_step):
# load frame in dataset into frames list
frames.append(self.videos[video_name]['frame'][i])
return frames
def __getitem__(self, index):
video_name = self.samples[index].split('/')[-2]
frame_name = int(self.samples[index].split('/')[-1].split('.')[-2])
batch = []
for i in range(self._time_step+self._num_pred):
image = np_load_frame(
self.videos[video_name]['frame'][frame_name+i], self._resize_height, self._resize_width)
if self.transform is not None:
batch.append(self.transform(image))
return np.concatenate(batch, axis=0)
def __len__(self):
return len(self.samples)