-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathblob_eval.py
executable file
·104 lines (95 loc) · 4.1 KB
/
blob_eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
#!/usr/bin/env python3
def main():
from zetastitcher import InputFile
import logging
import coloredlogs
import pandas as pd
import argparse
import numpy as np
import os
from blob_utils import blob_detector, compare_points
logger = logging.getLogger(__name__)
logging.basicConfig(format='[%(funcName)s] - %(asctime)s - %(message)s', level=logging.INFO)
coloredlogs.install(level='DEBUG', logger=logger)
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help='input folder', metavar='PATH')
parser.add_argument('-o', '--output', help='output base path', metavar='PATH')
parser.add_argument('-s1xy', type=float, default=13.0, help="smaller xy sigma")
parser.add_argument('-s1z', type=float, default=7.0, help="smaller z sigma")
parser.add_argument('-s2xy', type=float, default=26.0, help="larger xy sigma")
parser.add_argument('-s2z', type=float, default=14.0, help="larger z sigma")
parser.add_argument('-t', type=float, default=310.0, help="absolute threshold")
parser.add_argument('-d', type=float, default=5.0, help="threshold distance")
args = parser.parse_args()
logger.info('initializing...')
lista = os.listdir(args.input)
lista = [x for x in lista if 'tif' in x and 'csv' not in x]
tp = []
fp = []
fn = []
prec = []
rec = []
f1 = []
for element in lista:
logger.info('processing file %s', element)
handle = InputFile(os.path.join(args.input,element))
image = handle.whole()
detected = blob_detector(image, args.s1xy, args.s1z, args.s2xy, args.s2z, args.t)
name, ext = os.path.splitext(element)
true = np.genfromtxt(os.path.join(args.input, name + '.csv'), delimiter=',', skip_header=1)
if len(true) == 0:
local_tp = 0
local_fp = len(detected)
local_fn = 0
else:
if len(true.shape) > 1:
true = true[:, 5:8]
true[:, [0, 2]] = true[:, [2, 0]]
else:
true = true[5:8]
true = np.flip(true)
ltp, lfp, lfn = compare_points(detected, true, args.d)
local_tp = len(ltp)
local_fp = len(lfp)
local_fn = len(lfn)
tp.append(local_tp)
fp.append(local_fp)
fn.append(local_fn)
try:
prec.append(local_tp / (local_tp + local_fp))
except:
prec.append(0)
try:
rec.append(local_tp / (local_tp + local_fn))
except:
rec.append(0)
f1.append(2 * local_tp / (2 * local_tp + local_fp + local_fn))
total_tp = np.sum(tp)
total_fp = np.sum(fp)
total_fn = np.sum(fn)
macro_prec = total_tp / (total_tp + total_fp)
macro_rec = total_tp / (total_tp + total_fn)
macro_f1 = 2 * macro_rec * macro_prec / (macro_prec + macro_rec)
df = pd.DataFrame(zip(lista, tp, fp, fn, prec, rec, f1), columns=['filename', 'true positives', 'false positives',
'false negatives', 'precision', 'recall',
'f1-score'])
if not os.path.exists(args.output):
os.makedirs(args.output, 0o775)
pd_file = os.path.join(args.output, 'eval.csv')
logger.info('writing results...')
df.to_csv(pd_file, index=False)
sum_file = os.path.join(args.output, 'summary.csv')
file = open(sum_file, "w")
file.write("Number of subvolumes: %d\n" % (len(lista)))
file.write("Total true positives: %d\n" % total_tp)
file.write("Total false positives: %d\n" % total_fp)
file.write("Total false negatives: %d\n" % total_fn)
file.write("Macro-averaged precision: %02f\n" % macro_prec)
file.write("Macro-averaged recall: %02f\n" % macro_rec)
file.write("Micro-averaged f1-score: %02f\n" % macro_f1)
file.write("Micro-averaged precision: %02f\n" % np.mean(prec))
file.write("Micro-averaged recall: %02f\n" % np.mean(rec))
file.write("Micro-averaged f1-score: %02f\n" % np.mean(f1))
file.close()
if __name__ == "__main__":
main()