-
Notifications
You must be signed in to change notification settings - Fork 1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Sourcery Starbot ⭐ refactored buts101/Detectron #1
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -78,8 +78,8 @@ def multi_gpu_generate_rpn_on_dataset(num_images, output_dir): | |
# Retrieve the test_net binary path | ||
binary_dir = envu.get_runtime_dir() | ||
binary_ext = envu.get_py_bin_ext() | ||
binary = os.path.join(binary_dir, 'test_net' + binary_ext) | ||
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary) | ||
binary = os.path.join(binary_dir, f'test_net{binary_ext}') | ||
assert os.path.exists(binary), f"Binary '{binary}' not found" | ||
|
||
# Run inference in parallel in subprocesses | ||
outputs = subprocess_utils.process_in_parallel( | ||
|
@@ -97,7 +97,7 @@ def multi_gpu_generate_rpn_on_dataset(num_images, output_dir): | |
save_object( | ||
dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file | ||
) | ||
logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file))) | ||
logger.info(f'Wrote RPN proposals to {os.path.abspath(rpn_file)}') | ||
return boxes, scores, ids, rpn_file | ||
|
||
|
||
|
@@ -139,7 +139,7 @@ def generate_rpn_on_range(ind_range=None): | |
save_object( | ||
dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file | ||
) | ||
logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file))) | ||
logger.info(f'Wrote RPN proposals to {os.path.abspath(rpn_file)}') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
return boxes, scores, ids, rpn_file | ||
|
||
|
||
|
@@ -193,13 +193,15 @@ def im_proposals(model, im): | |
k_max = cfg.FPN.RPN_MAX_LEVEL | ||
k_min = cfg.FPN.RPN_MIN_LEVEL | ||
rois_names = [ | ||
core.ScopedName('rpn_rois_fpn' + str(l)) | ||
core.ScopedName(f'rpn_rois_fpn{str(l)}') | ||
for l in range(k_min, k_max + 1) | ||
] | ||
|
||
score_names = [ | ||
core.ScopedName('rpn_roi_probs_fpn' + str(l)) | ||
core.ScopedName(f'rpn_roi_probs_fpn{str(l)}') | ||
for l in range(k_min, k_max + 1) | ||
] | ||
|
||
Comment on lines
-196
to
+204
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
blobs = workspace.FetchBlobs(rois_names + score_names) | ||
# Combine predictions across all levels and retain the top scoring | ||
boxes = np.concatenate(blobs[:len(rois_names)]) | ||
|
@@ -269,10 +271,8 @@ def _get_image_blob(im): | |
im_orig -= cfg.PIXEL_MEANS | ||
|
||
im_shape = im_orig.shape | ||
im_size_min = np.min(im_shape[0:2]) | ||
im_size_max = np.max(im_shape[0:2]) | ||
|
||
processed_ims = [] | ||
im_size_min = np.min(im_shape[:2]) | ||
im_size_max = np.max(im_shape[:2]) | ||
Comment on lines
-272
to
+275
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
assert len(cfg.TEST.SCALES) == 1 | ||
target_size = cfg.TEST.SCALES[0] | ||
|
@@ -284,8 +284,7 @@ def _get_image_blob(im): | |
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, | ||
interpolation=cv2.INTER_LINEAR) | ||
im_info = np.hstack((im.shape[:2], im_scale))[np.newaxis, :] | ||
processed_ims.append(im) | ||
|
||
processed_ims = [im] | ||
# Create a blob to hold the input images | ||
blob = im_list_to_blob(processed_ims) | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -291,10 +291,11 @@ def im_detect_bbox_hflip(model, im, box_proposals=None): | |
im_hf = im[:, ::-1, :] | ||
im_width = im.shape[1] | ||
|
||
if not cfg.MODEL.FASTER_RCNN: | ||
box_proposals_hf = box_utils.flip_boxes(box_proposals, im_width) | ||
else: | ||
box_proposals_hf = None | ||
box_proposals_hf = ( | ||
None | ||
if cfg.MODEL.FASTER_RCNN | ||
else box_utils.flip_boxes(box_proposals, im_width) | ||
) | ||
Comment on lines
-294
to
+298
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
scores_hf, boxes_hf, im_scales = im_detect_bbox( | ||
model, im_hf, box_proposals_hf | ||
|
@@ -343,10 +344,11 @@ def im_detect_bbox_aspect_ratio( | |
# Compute predictions on the transformed image | ||
im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio) | ||
|
||
if not cfg.MODEL.FASTER_RCNN: | ||
box_proposals_ar = box_utils.aspect_ratio(box_proposals, aspect_ratio) | ||
else: | ||
box_proposals_ar = None | ||
box_proposals_ar = ( | ||
None | ||
if cfg.MODEL.FASTER_RCNN | ||
else box_utils.aspect_ratio(box_proposals, aspect_ratio) | ||
) | ||
Comment on lines
-346
to
+351
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
if hflip: | ||
scores_ar, boxes_ar, _ = im_detect_bbox_hflip( | ||
|
@@ -489,10 +491,7 @@ def im_detect_mask_hflip(model, im, boxes): | |
im_scales = im_conv_body_only(model, im_hf) | ||
masks_hf = im_detect_mask(model, im_scales, boxes_hf) | ||
|
||
# Invert the predicted soft masks | ||
masks_inv = masks_hf[:, :, :, ::-1] | ||
|
||
return masks_inv | ||
return masks_hf[:, :, :, ::-1] | ||
Comment on lines
-492
to
+494
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
This removes the following comments ( why? ):
|
||
|
||
|
||
def im_detect_mask_scale(model, im, scale, max_size, boxes, hflip=False): | ||
|
@@ -527,12 +526,9 @@ def im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes, hflip=False): | |
boxes_ar = box_utils.aspect_ratio(boxes, aspect_ratio) | ||
|
||
if hflip: | ||
masks_ar = im_detect_mask_hflip(model, im_ar, boxes_ar) | ||
else: | ||
im_scales = im_conv_body_only(model, im_ar) | ||
masks_ar = im_detect_mask(model, im_scales, boxes_ar) | ||
|
||
return masks_ar | ||
return im_detect_mask_hflip(model, im_ar, boxes_ar) | ||
im_scales = im_conv_body_only(model, im_ar) | ||
return im_detect_mask(model, im_scales, boxes_ar) | ||
Comment on lines
-530
to
+531
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
|
||
def im_detect_keypoints(model, im_scales, boxes): | ||
|
@@ -675,10 +671,7 @@ def im_detect_keypoints_hflip(model, im, boxes): | |
im_scales = im_conv_body_only(model, im_hf) | ||
heatmaps_hf = im_detect_keypoints(model, im_scales, boxes_hf) | ||
|
||
# Invert the predicted keypoints | ||
heatmaps_inv = keypoint_utils.flip_heatmaps(heatmaps_hf) | ||
|
||
return heatmaps_inv | ||
return keypoint_utils.flip_heatmaps(heatmaps_hf) | ||
Comment on lines
-678
to
+674
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
This removes the following comments ( why? ):
|
||
|
||
|
||
def im_detect_keypoints_scale(model, im, scale, max_size, boxes, hflip=False): | ||
|
@@ -715,18 +708,17 @@ def im_detect_keypoints_aspect_ratio( | |
boxes_ar = box_utils.aspect_ratio(boxes, aspect_ratio) | ||
|
||
if hflip: | ||
heatmaps_ar = im_detect_keypoints_hflip(model, im_ar, boxes_ar) | ||
else: | ||
im_scales = im_conv_body_only(model, im_ar) | ||
heatmaps_ar = im_detect_keypoints(model, im_scales, boxes_ar) | ||
|
||
return heatmaps_ar | ||
return im_detect_keypoints_hflip(model, im_ar, boxes_ar) | ||
im_scales = im_conv_body_only(model, im_ar) | ||
return im_detect_keypoints(model, im_scales, boxes_ar) | ||
Comment on lines
-718
to
+713
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
|
||
def combine_heatmaps_size_dep(hms_ts, ds_ts, us_ts, boxes, heur_f): | ||
"""Combines heatmaps while taking object sizes into account.""" | ||
assert len(hms_ts) == len(ds_ts) and len(ds_ts) == len(us_ts), \ | ||
'All sets of hms must be tagged with downscaling and upscaling flags' | ||
assert ( | ||
len(hms_ts) == len(ds_ts) == len(us_ts) | ||
), 'All sets of hms must be tagged with downscaling and upscaling flags' | ||
|
||
Comment on lines
-728
to
+721
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
# Classify objects into small+medium and large based on their box areas | ||
areas = box_utils.boxes_area(boxes) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -62,18 +62,17 @@ def test_net_on_dataset(multi_gpu=False): | |
all_boxes, all_segms, all_keyps = test_net() | ||
test_timer.toc() | ||
logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) | ||
results = task_evaluation.evaluate_all( | ||
return task_evaluation.evaluate_all( | ||
dataset, all_boxes, all_segms, all_keyps, output_dir | ||
) | ||
return results | ||
Comment on lines
-65
to
-68
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
|
||
def multi_gpu_test_net_on_dataset(num_images, output_dir): | ||
"""Multi-gpu inference on a dataset.""" | ||
binary_dir = envu.get_runtime_dir() | ||
binary_ext = envu.get_py_bin_ext() | ||
binary = os.path.join(binary_dir, 'test_net' + binary_ext) | ||
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary) | ||
binary = os.path.join(binary_dir, f'test_net{binary_ext}') | ||
assert os.path.exists(binary), f"Binary '{binary}' not found" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
# Run inference in parallel in subprocesses | ||
# Outputs will be a list of outputs from each subprocess, where the output | ||
|
@@ -104,7 +103,7 @@ def multi_gpu_test_net_on_dataset(num_images, output_dir): | |
cfg=cfg_yaml | ||
), det_file | ||
) | ||
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file))) | ||
logger.info(f'Wrote detections to: {os.path.abspath(det_file)}') | ||
|
||
return all_boxes, all_segms, all_keyps | ||
|
||
|
@@ -208,7 +207,7 @@ def test_net(ind_range=None): | |
cfg=cfg_yaml | ||
), det_file | ||
) | ||
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file))) | ||
logger.info(f'Wrote detections to: {os.path.abspath(det_file)}') | ||
Comment on lines
-211
to
+210
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
return all_boxes, all_segms, all_keyps | ||
|
||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -45,38 +45,39 @@ def convert_coco_stuff_mat(data_dir, out_dir): | |
with open(file_list % data_set) as f: | ||
for img_id, img_name in enumerate(f): | ||
img_name = img_name.replace('coco', 'COCO').strip('\n') | ||
image = {} | ||
mat_file = os.path.join( | ||
data_dir, 'annotations/%s.mat' % img_name) | ||
mat_file = os.path.join(data_dir, f'annotations/{img_name}.mat') | ||
data = h5py.File(mat_file, 'r') | ||
labelMap = data.get('S') | ||
if len(categories) == 0: | ||
if not categories: | ||
labelNames = data.get('names') | ||
for idx, n in enumerate(labelNames): | ||
categories.append( | ||
{"id": idx, "name": ''.join(chr(i) for i in data[ | ||
n[0]])}) | ||
categories.extend( | ||
{ | ||
"id": idx, | ||
"name": ''.join(chr(i) for i in data[n[0]]), | ||
} | ||
for idx, n in enumerate(labelNames) | ||
) | ||
|
||
ann_dict['categories'] = categories | ||
scipy.misc.imsave( | ||
os.path.join(data_dir, img_name + '.png'), labelMap) | ||
image['width'] = labelMap.shape[0] | ||
image['height'] = labelMap.shape[1] | ||
image['file_name'] = img_name | ||
image['seg_file_name'] = img_name | ||
image['id'] = img_id | ||
scipy.misc.imsave(os.path.join(data_dir, f'{img_name}.png'), labelMap) | ||
image = { | ||
'width': labelMap.shape[0], | ||
'height': labelMap.shape[1], | ||
'file_name': img_name, | ||
'seg_file_name': img_name, | ||
'id': img_id, | ||
} | ||
|
||
images.append(image) | ||
ann_dict['images'] = images | ||
print("Num images: %s" % len(images)) | ||
print(f"Num images: {len(images)}") | ||
Comment on lines
-48
to
+73
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile: | ||
outfile.write(json.dumps(ann_dict)) | ||
|
||
|
||
# for Cityscapes | ||
def getLabelID(self, instID): | ||
if (instID < 1000): | ||
return instID | ||
else: | ||
return int(instID / 1000) | ||
return instID if (instID < 1000) else int(instID / 1000) | ||
Comment on lines
-76
to
+80
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
|
||
def convert_cityscapes_instance_only( | ||
|
@@ -119,29 +120,28 @@ def convert_cityscapes_instance_only( | |
] | ||
|
||
for data_set, ann_dir in zip(sets, ann_dirs): | ||
print('Starting %s' % data_set) | ||
ann_dict = {} | ||
print(f'Starting {data_set}') | ||
Comment on lines
-122
to
+123
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
images = [] | ||
annotations = [] | ||
ann_dir = os.path.join(data_dir, ann_dir) | ||
for root, _, files in os.walk(ann_dir): | ||
for filename in files: | ||
if filename.endswith(ends_in % data_set.split('_')[0]): | ||
if len(images) % 50 == 0: | ||
print("Processed %s images, %s annotations" % ( | ||
len(images), len(annotations))) | ||
print(f"Processed {len(images)} images, {len(annotations)} annotations") | ||
json_ann = json.load(open(os.path.join(root, filename))) | ||
image = {} | ||
image['id'] = img_id | ||
image = {'id': img_id} | ||
img_id += 1 | ||
|
||
image['width'] = json_ann['imgWidth'] | ||
image['height'] = json_ann['imgHeight'] | ||
image['file_name'] = filename[:-len( | ||
ends_in % data_set.split('_')[0])] + 'leftImg8bit.png' | ||
image['seg_file_name'] = filename[:-len( | ||
ends_in % data_set.split('_')[0])] + \ | ||
'%s_instanceIds.png' % data_set.split('_')[0] | ||
image['seg_file_name'] = ( | ||
filename[: -len(ends_in % data_set.split('_')[0])] | ||
+ f"{data_set.split('_')[0]}_instanceIds.png" | ||
) | ||
|
||
images.append(image) | ||
|
||
fullname = os.path.join(root, image['seg_file_name']) | ||
|
@@ -162,8 +162,7 @@ def convert_cityscapes_instance_only( | |
print('Warning: invalid contours.') | ||
continue # skip non-instance categories | ||
|
||
ann = {} | ||
ann['id'] = ann_id | ||
ann = {'id': ann_id} | ||
ann_id += 1 | ||
ann['image_id'] = image['id'] | ||
ann['segmentation'] = obj['contours'] | ||
|
@@ -180,14 +179,14 @@ def convert_cityscapes_instance_only( | |
|
||
annotations.append(ann) | ||
|
||
ann_dict['images'] = images | ||
ann_dict = {'images': images} | ||
categories = [{"id": category_dict[name], "name": name} for name in | ||
category_dict] | ||
ann_dict['categories'] = categories | ||
ann_dict['annotations'] = annotations | ||
print("Num categories: %s" % len(categories)) | ||
print("Num images: %s" % len(images)) | ||
print("Num annotations: %s" % len(annotations)) | ||
print(f"Num categories: {len(categories)}") | ||
print(f"Num images: {len(images)}") | ||
print(f"Num annotations: {len(annotations)}") | ||
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile: | ||
outfile.write(json.dumps(ann_dict)) | ||
|
||
|
@@ -199,4 +198,4 @@ def convert_cityscapes_instance_only( | |
elif args.dataset == "cocostuff": | ||
convert_coco_stuff_mat(args.datadir, args.outdir) | ||
else: | ||
print("Dataset not supported: %s" % args.dataset) | ||
print(f"Dataset not supported: {args.dataset}") | ||
Comment on lines
-202
to
+201
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -40,22 +40,18 @@ def parse_args(): | |
parser.print_help() | ||
sys.exit(1) | ||
|
||
args = parser.parse_args() | ||
return args | ||
return parser.parse_args() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
|
||
def convert_coco_blobs_to_cityscape_blobs(model_dict): | ||
for k, v in model_dict['blobs'].items(): | ||
if v.shape[0] == NUM_COCO_CLS or v.shape[0] == 4 * NUM_COCO_CLS: | ||
if v.shape[0] in [NUM_COCO_CLS, 4 * NUM_COCO_CLS]: | ||
coco_blob = model_dict['blobs'][k] | ||
print( | ||
'Converting COCO blob {} with shape {}'. | ||
format(k, coco_blob.shape) | ||
) | ||
print(f'Converting COCO blob {k} with shape {coco_blob.shape}') | ||
cs_blob = convert_coco_blob_to_cityscapes_blob( | ||
coco_blob, args.convert_func | ||
) | ||
print(' -> converted shape {}'.format(cs_blob.shape)) | ||
print(f' -> converted shape {cs_blob.shape}') | ||
Comment on lines
-49
to
+54
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
model_dict['blobs'][k] = cs_blob | ||
|
||
|
||
|
@@ -64,7 +60,7 @@ def convert_coco_blob_to_cityscapes_blob(coco_blob, convert_func): | |
coco_shape = coco_blob.shape | ||
leading_factor = int(coco_shape[0] / NUM_COCO_CLS) | ||
tail_shape = list(coco_shape[1:]) | ||
assert leading_factor == 1 or leading_factor == 4 | ||
assert leading_factor in {1, 4} | ||
Comment on lines
-67
to
+63
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
||
# Reshape in [num_classes, ...] form for easier manipulations | ||
coco_blob = coco_blob.reshape([NUM_COCO_CLS, -1] + tail_shape) | ||
|
@@ -108,5 +104,5 @@ def load_and_convert_coco_model(args): | |
|
||
with open(args.out_file_name, 'w') as f: | ||
pickle.dump(weights, f, protocol=pickle.HIGHEST_PROTOCOL) | ||
print('Wrote blobs to {}:'.format(args.out_file_name)) | ||
print(f'Wrote blobs to {args.out_file_name}:') | ||
Comment on lines
-111
to
+107
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
|
||
print(sorted(weights['blobs'].keys())) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Function
multi_gpu_generate_rpn_on_dataset
refactored with the following changes:use-fstring-for-concatenation
)use-fstring-for-formatting
)