-
Notifications
You must be signed in to change notification settings - Fork 45
/
Copy pathcoco.py
217 lines (180 loc) · 8.52 KB
/
coco.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
import os
import os.path as osp
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
import numpy as np
from .config import cfg
from pycocotools import mask as maskUtils
from instaboost import get_new_data, InstaBoostConfig
def get_label_map():
if cfg.dataset.label_map is None:
return {x+1: x+1 for x in range(len(cfg.dataset.class_names))}
else:
return cfg.dataset.label_map
class COCOAnnotationTransform(object):
"""Transforms a COCO annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
"""
def __init__(self):
self.label_map = get_label_map()
def __call__(self, target, width, height):
"""
Args:
target (dict): COCO target json annotation as a python dict
height (int): height
width (int): width
Returns:
a list containing lists of bounding boxes [bbox coords, class idx]
"""
scale = np.array([width, height, width, height])
res = []
for obj in target:
if 'bbox' in obj:
bbox = obj['bbox']
label_idx = self.label_map[obj['category_id']] - 1
final_box = list(np.array([bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]])/scale)
final_box.append(label_idx)
res += [final_box] # [xmin, ymin, xmax, ymax, label_idx]
else:
print("No bbox found for object ", obj)
return res # [[xmin, ymin, xmax, ymax, label_idx], ... ]
class COCODetection(data.Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
set_name (string): Name of the specific set of COCO images.
transform (callable, optional): A function/transform that augments the
raw images`
target_transform (callable, optional): A function/transform that takes
in the target (bbox) and transforms it.
prep_crowds (bool): Whether or not to prepare crowds for the evaluation step.
"""
def __init__(self, image_path, info_file, transform=None,
target_transform=COCOAnnotationTransform(),
dataset_name='MS COCO', has_gt=True,is_train=True):
# Do this here because we have too many things named COCO
from pycocotools.coco import COCO
self.is_train=is_train
self.root = image_path
self.coco = COCO(info_file)
self.ids = list(self.coco.imgToAnns.keys())
if len(self.ids) == 0 or not has_gt:
self.ids = list(self.coco.imgs.keys())
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
self.has_gt = has_gt
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, (target, masks, num_crowds)).
target is the object returned by ``coco.loadAnns``.
"""
im, gt, masks, h, w, num_crowds = self.pull_item(index)
return im, (gt, masks, num_crowds)
def __len__(self):
return len(self.ids)
def pull_item(self, index):
"""
Args:
index (int): Indexcolor_cache
Returns:
tuple: Tuple (image, target, masks, height, width, crowd).
target is the object returned by ``coco.loadAnns``.
Note that if no crowd annotations exist, crowd will be None
"""
img_id = self.ids[index]
if self.has_gt:
target = self.coco.imgToAnns[img_id]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
# Target has {'segmentation', 'area', iscrowd', 'image_id', 'bbox', 'category_id'}
target = self.coco.loadAnns(ann_ids)
else:
target = []
file_name = self.coco.loadImgs(img_id)[0]['file_name']
if file_name.startswith('COCO'):
file_name = file_name.split('_')[-1]
path = osp.join(self.root, file_name)
assert osp.exists(path), 'Image path does not exist: {}'.format(path)
img = cv2.imread(path)
if self.is_train:
target, img = get_new_data(target, img, None, background=None)
# Separate out crowd annotations. These are annotations that signify a large crowd of
# objects of said class, where there is no annotation for each individual object. Both
# during testing and training, consider these crowds as neutral.
crowd = [x for x in target if ('iscrowd' in x and x['iscrowd'])]
target = [x for x in target if not ('iscrowd' in x and x['iscrowd'])]
num_crowds = len(crowd)
# This is so we ensure that all crowd annotations are at the end of the array
target += crowd
# The split here is to have compatibility with both COCO2014 and 2017 annotations.
# In 2014, images have the pattern COCO_{train/val}2014_%012d.jpg, while in 2017 it's %012d.jpg.
# Our script downloads the images as %012d.jpg so convert accordingly.
# file_name = self.coco.loadImgs(img_id)[0]['file_name']
# if file_name.startswith('COCO'):
# file_name = file_name.split('_')[-1]
# path = osp.join(self.root, file_name)
# assert osp.exists(path), 'Image path does not exist: {}'.format(path)
# img = cv2.imread(path)
height, width, _ = img.shape
if len(target) > 0:
# Pool all the masks for this image into one [num_objects,height,width] matrix
masks = [self.coco.annToMask(obj).reshape(-1) for obj in target]
masks = np.vstack(masks)
masks = masks.reshape(-1, height, width)
if self.target_transform is not None and len(target) > 0:
target = self.target_transform(target, width, height)
if self.transform is not None:
if len(target) > 0:
target = np.array(target)
img, masks, boxes, labels = self.transform(img, masks, target[:, :4],
{'num_crowds': num_crowds, 'labels': target[:, 4]})
# I stored num_crowds in labels so I didn't have to modify the entirety of augmentations
num_crowds = labels['num_crowds']
labels = labels['labels']
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
else:
img, _, _, _ = self.transform(img, np.zeros((1, height, width), dtype=np.float), np.array([[0, 0, 1, 1]]),
{'num_crowds': 0, 'labels': np.array([0])})
masks = None
target = None
return torch.from_numpy(img).permute(2, 0, 1), target, masks, height, width, num_crowds
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
cv2 img
'''
img_id = self.ids[index]
path = self.coco.loadImgs(img_id)[0]['file_name']
return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
return self.coco.loadAnns(ann_ids)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str