
1.sklearn.metrics.confusion_matrix (y_true, y_pred, labels=None, sample_weight=None)
y_true为真实值, y_pred为预测值(softmax之后, 取最大值的坐标, 即预测的像素点类别) y_true和y_pred都需要flatten为一维数组
1
2
3
4
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
或
1
2
3
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
2.Implement by Torch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def compute_results(conf_total):
n_class = conf_total.shape[0]
consider_unlabeled = True # must consider the unlabeled, please set it to True
if consider_unlabeled is True:
start_index = 0
else:
start_index = 1
precision_per_class = np.zeros(n_class)
recall_per_class = np.zeros(n_class)
iou_per_class = np.zeros(n_class)
f1_score_per_class = np.zeros(n_class)
accuracy = 0.0
for cid in range(start_index, n_class): # cid: class id
accuracy += float(conf_total[cid, cid])
if conf_total[start_index:, cid].sum() == 0:
precision_per_class[cid] = np.nan
else:
precision_per_class[cid] = float(conf_total[cid, cid]) / float(conf_total[start_index:, cid].sum()) # precision = TP/TP+FP
if conf_total[cid, start_index:].sum() == 0:
recall_per_class[cid] = np.nan
else:
recall_per_class[cid] = float(conf_total[cid, cid]) / float(conf_total[cid, start_index:].sum()) # recall = TP/TP+FN
if (conf_total[cid, start_index:].sum() + conf_total[start_index:, cid].sum() - conf_total[cid, cid]) == 0:
iou_per_class[cid] = np.nan
else:
iou_per_class[cid] = float(conf_total[cid, cid]) / float((conf_total[cid, start_index:].sum() + conf_total[start_index:, cid].sum() - conf_total[cid, cid])) # IoU = TP/TP+FP+FN
f1_score_per_class[cid] = float(2 * precision_per_class[cid] * recall_per_class[cid]) / float(precision_per_class[cid] + recall_per_class[cid])
accuracy = accuracy / float(np.sum(conf_total))
return accuracy, precision_per_class, recall_per_class, iou_per_class, f1_score_per_class
def val(model, dataloader, optimizer, logger, epoch, Epoches, work_dir, NUM_CLASSES):
model = model.eval()
conf_total = np.zeros((NUM_CLASSES, NUM_CLASSES))
for iter, batch in enumerate(dataloader):
imgs, pngs, labels = batch
imgs = Variable(torch.from_numpy(imgs).type(torch.FloatTensor))
pngs = Variable(torch.from_numpy(pngs).type(torch.FloatTensor)).long()
labels = Variable(torch.from_numpy(labels).type(torch.FloatTensor))
imgs = imgs.cuda()
pngs = pngs.cuda()
labels = labels.cuda()
outputs = model(imgs)
loss = F.cross_entropy(outputs, pngs)
#loss = CE_Loss(outputs, pngs, num_classes = NUM_CLASSES)
label = pngs.cpu().numpy().squeeze().flatten()
prediction = outputs.argmax(1).cpu().numpy().squeeze().flatten()
conf = confusion_matrix(y_true=label, y_pred=prediction, labels=[0,1,2,3,4])
conf_total += conf
accuracy, precision, recall, IoU, f1 = compute_results(conf_total)
