Skip to content

Commit

Permalink
[Improve] Harmless changes according to Pylint (#338)
Browse files Browse the repository at this point in the history
* solve R1710

* solve W0107

* solve W0613 a little bit

* solve R0201

* solve R1705

* solve W0612

* solve C0325

* solve W0235

* solve R1719

* solve C0200

* solve R1716

* fix

* add pylintrc for convenience

* polish

* update
  • Loading branch information
dreamerlin authored Nov 30, 2020
1 parent 87ee33f commit 81450c8
Show file tree
Hide file tree
Showing 41 changed files with 815 additions and 203 deletions.
622 changes: 622 additions & 0 deletions .pylintrc

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion mmaction/apis/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def inference_recognizer(model, video_path, label_path, use_frames=False):
if osp.isfile(video_path) and use_frames:
raise RuntimeError(
f"'{video_path}' is a video file, not a rawframe directory")
elif osp.isdir(video_path) and not use_frames:
if osp.isdir(video_path) and not use_frames:
raise RuntimeError(
f"'{video_path}' is a rawframe directory, not a video file")

Expand Down
30 changes: 15 additions & 15 deletions mmaction/apis/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,21 +125,20 @@ def collect_results_cpu(result_part, size, tmpdir=None):
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results


def collect_results_gpu(result_part, size):
Expand Down Expand Up @@ -185,3 +184,4 @@ def collect_results_gpu(result_part, size):
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
return None
12 changes: 5 additions & 7 deletions mmaction/core/evaluation/accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,8 @@ def mmit_mean_average_precision(scores, labels):
np.float: The MMIT style mean average precision.
"""
results = []
for i in range(len(scores)):
precision, recall, _ = binary_precision_recall_curve(
scores[i], labels[i])
for score, label in zip(scores, labels):
precision, recall, _ = binary_precision_recall_curve(score, label)
ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])
results.append(ap)
return np.mean(results)
Expand All @@ -144,9 +143,8 @@ def mean_average_precision(scores, labels):
scores = np.stack(scores).T
labels = np.stack(labels).T

for i in range(len(scores)):
precision, recall, _ = binary_precision_recall_curve(
scores[i], labels[i])
for score, label in zip(scores, labels):
precision, recall, _ = binary_precision_recall_curve(score, label)
ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])
results.append(ap)
results = [x for x in results if not np.isnan(x)]
Expand Down Expand Up @@ -466,7 +464,7 @@ def average_precision_at_temporal_iou(ground_truth,
for idx, this_pred in enumerate(prediction):

# Check if there is at least one ground truth in the video.
if (this_pred[0] in ground_truth):
if this_pred[0] in ground_truth:
this_gt = np.array(ground_truth[this_pred[0]], dtype=float)
else:
fp[:, idx] = 1
Expand Down
3 changes: 2 additions & 1 deletion mmaction/core/evaluation/eval_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,8 @@ def __init__(self,
f'Fixed threshold for tiou score: {self.tiou_thresholds}')
print_log(log_msg, logger=self.logger)

def _import_ground_truth(self, ground_truth_filename):
@staticmethod
def _import_ground_truth(ground_truth_filename):
"""Read ground truth file and return the ground truth instances and the
activity classes.
Expand Down
6 changes: 3 additions & 3 deletions mmaction/core/evaluation/eval_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def __init__(self,
f'or in {self.less_keys} when rule is None, '
f'but got {key_indicator}')

if not interval > 0:
if interval <= 0:
raise ValueError(f'interval must be positive, but got {interval}')
if start is not None and start < 0:
warnings.warn(
Expand Down Expand Up @@ -178,8 +178,8 @@ def evaluate(self, runner, results):
'it in config file')
return None
return eval_res[self.key_indicator]
else:
return None

return None


class DistEpochEvalHook(EpochEvalHook):
Expand Down
2 changes: 1 addition & 1 deletion mmaction/core/runner/omnisource_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def train(self, data_loaders, **kwargs):
continue

for idx, n_times in enumerate(auxiliary_iter_times):
for step in range(n_times):
for _ in range(n_times):
data_batch = next(self.aux_iters[idx])
self.call_hook('before_train_iter')
self.run_iter(
Expand Down
6 changes: 4 additions & 2 deletions mmaction/datasets/activitynet_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,8 @@ def _import_ground_truth(self):
ground_truth[video_id] = np.array(this_video_ground_truths)
return ground_truth

def proposals2json(self, results, show_progress=False):
@staticmethod
def proposals2json(results, show_progress=False):
"""Convert all proposals to a final dict(json) format.
Args:
Expand All @@ -141,7 +142,8 @@ def proposals2json(self, results, show_progress=False):
prog_bar.update()
return result_dict

def _import_proposals(self, results):
@staticmethod
def _import_proposals(results):
"""Read predictions from results."""
proposals = {}
num_proposals = 0
Expand Down
2 changes: 1 addition & 1 deletion mmaction/datasets/audio_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def load_annotations(self):
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert len(label), f'missing label in line: {line}'
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
onehot = torch.zeros(self.num_classes)
Expand Down
2 changes: 1 addition & 1 deletion mmaction/datasets/audio_feature_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def load_annotations(self):
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert len(label), f'missing label in line: {line}'
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
onehot = torch.zeros(self.num_classes)
Expand Down
12 changes: 6 additions & 6 deletions mmaction/datasets/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ def __init__(self,
@abstractmethod
def load_annotations(self):
"""Load the annotation according to ann_file into video_infos."""
pass

# json annotations already looks like video_infos, so for each dataset,
# this func should be the same
Expand Down Expand Up @@ -224,7 +223,8 @@ def evaluate(self,

return eval_results

def dump_results(self, results, out):
@staticmethod
def dump_results(results, out):
"""Dump data to json/yaml/pickle strings or files."""
return mmcv.dump(results, out)

Expand All @@ -241,7 +241,7 @@ def prepare_train_frames(self, idx):

# prepare tensor in getitem
# If HVU, type(results['label']) is dict
if self.multi_class and type(results['label']) is list:
if self.multi_class and isinstance(results['label'], list):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
Expand All @@ -261,7 +261,7 @@ def prepare_test_frames(self, idx):

# prepare tensor in getitem
# If HVU, type(results['label']) is dict
if self.multi_class and type(results['label']) is list:
if self.multi_class and isinstance(results['label'], list):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
Expand All @@ -276,5 +276,5 @@ def __getitem__(self, idx):
"""Get the sample for either training or testing given index."""
if self.test_mode:
return self.prepare_test_frames(idx)
else:
return self.prepare_train_frames(idx)

return self.prepare_train_frames(idx)
2 changes: 1 addition & 1 deletion mmaction/datasets/hvu_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def evaluate(self,
gt_labels = [ann['label'] for ann in self.video_infos]

eval_results = {}
for i, category in enumerate(self.tag_categories):
for category in self.tag_categories:

start_idx = self.category2startidx[category]
num = self.category2num[category]
Expand Down
15 changes: 4 additions & 11 deletions mmaction/datasets/pipelines/augmentations.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,10 +252,7 @@ def __init__(self, flip_ratio=0.5, direction='horizontal'):
self.direction = direction

def __call__(self, results):
if np.random.rand() < self.flip_ratio:
flip = True
else:
flip = False
flip = np.random.rand() < self.flip_ratio

results['flip'] = flip
results['flip_direction'] = self.direction
Expand Down Expand Up @@ -906,10 +903,7 @@ def __call__(self, results):
if modality == 'Flow':
assert self.direction == 'horizontal'

if np.random.rand() < self.flip_ratio:
flip = True
else:
flip = False
flip = np.random.rand() < self.flip_ratio

results['flip'] = flip
results['flip_direction'] = self.direction
Expand Down Expand Up @@ -993,7 +987,7 @@ def __call__(self, results):
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_bgr=self.to_bgr)
return results
elif modality == 'Flow':
if modality == 'Flow':
num_imgs = len(results['imgs'])
assert num_imgs % 2 == 0
assert self.mean.shape[0] == 2
Expand All @@ -1019,8 +1013,7 @@ def __call__(self, results):
adjust_magnitude=self.adjust_magnitude)
results['img_norm_cfg'] = args
return results
else:
raise NotImplementedError
raise NotImplementedError

def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
Expand Down
11 changes: 5 additions & 6 deletions mmaction/datasets/pipelines/formating.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,15 @@ def to_tensor(data):
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
if isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
if isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
if isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
if isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
raise TypeError(f'type {type(data)} cannot be converted to tensor.')


@PIPELINES.register_module()
Expand Down
21 changes: 14 additions & 7 deletions mmaction/datasets/pipelines/loading.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ class LoadHVULabel:

def __init__(self, **kwargs):
self.hvu_initialized = False
self.kwargs = kwargs

def init_hvu_info(self, categories, category_nums):
assert len(categories) == len(category_nums)
Expand Down Expand Up @@ -502,7 +503,8 @@ def __init__(self,
self.mode = mode
self.test_interval = test_interval

def _get_train_indices(self, valid_length, num_segments):
@staticmethod
def _get_train_indices(valid_length, num_segments):
"""Get indices of different stages of proposals in train mode.
It will calculate the average interval for each segment,
Expand All @@ -528,7 +530,8 @@ def _get_train_indices(self, valid_length, num_segments):

return offsets

def _get_val_indices(self, valid_length, num_segments):
@staticmethod
def _get_val_indices(valid_length, num_segments):
"""Get indices of different stages of proposals in validation mode.
It will calculate the average interval for each segment.
Expand Down Expand Up @@ -1230,10 +1233,12 @@ def __init__(self,
self.kwargs = kwargs
self.file_client = None

def _zero_pad(self, shape):
@staticmethod
def _zero_pad(shape):
return np.zeros(shape, dtype=np.float32)

def _random_pad(self, shape):
@staticmethod
def _random_pad(shape):
# librosa load raw audio file into a distribution of -1~+1
return np.random.rand(shape).astype(np.float32) * 2 - 1

Expand Down Expand Up @@ -1286,10 +1291,12 @@ def __init__(self, pad_method='zero'):
raise NotImplementedError
self.pad_method = pad_method

def _zero_pad(self, shape):
@staticmethod
def _zero_pad(shape):
return np.zeros(shape, dtype=np.float32)

def _random_pad(self, shape):
@staticmethod
def _random_pad(shape):
# spectrogram is normalized into a distribution of 0~1
return np.random.rand(shape).astype(np.float32)

Expand Down Expand Up @@ -1387,7 +1394,7 @@ def __call__(self, results):
# the input should be one single image
assert len(results['imgs']) == 1
im = results['imgs'][0]
for i in range(1, self.clip_len):
for _ in range(1, self.clip_len):
results['imgs'].append(np.copy(im))
results['clip_len'] = self.clip_len
results['num_clips'] = 1
Expand Down
2 changes: 1 addition & 1 deletion mmaction/datasets/rawframe_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def load_annotations(self):
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert len(label), f'missing label in line: {line}'
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
Expand Down
2 changes: 1 addition & 1 deletion mmaction/datasets/rawvideo_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def sample_clip(self, results):
"""Sample a clip from the raw video given the sampling strategy."""
assert self.sampling_strategy in ['positive', 'random']
if self.sampling_strategy == 'positive':
assert len(results['positive_clip_inds'])
assert results['positive_clip_inds']
ind = random.choice(results['positive_clip_inds'])
else:
ind = random.randint(0, results['num_clips'] - 1)
Expand Down
Loading

0 comments on commit 81450c8

Please sign in to comment.