From 2ccc93f0c69b5b0fc8f603ee06d0d615a88a3cf6 Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Tue, 6 Jun 2017 17:20:46 +0900 Subject: [PATCH 01/16] use semantic_segmentation_evaluator inside train --- examples/segnet/train.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/examples/segnet/train.py b/examples/segnet/train.py index 80f7229acc..ced9cbe3d3 100644 --- a/examples/segnet/train.py +++ b/examples/segnet/train.py @@ -14,22 +14,14 @@ from chainer import training from chainer.training import extensions +from chainercv.datasets import camvid_label_names from chainercv.datasets import CamVidDataset from chainercv.datasets import TransformDataset +from chainercv.extensions import SemanticSegmentationEvaluator from chainercv.links import PixelwiseSoftmaxClassifier from chainercv.links import SegNetBasic -class TestModeEvaluator(extensions.Evaluator): - - def evaluate(self): - model = self.get_target('main') - model.train = False - ret = super(TestModeEvaluator, self).evaluate() - model.train = True - return ret - - def main(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', type=int, default=-1) @@ -84,24 +76,32 @@ def transform(in_data): trainer.extend(extensions.LogReport(trigger=log_trigger)) trainer.extend(extensions.observe_lr(), trigger=log_trigger) trainer.extend(extensions.dump_graph('main/loss')) - trainer.extend(TestModeEvaluator(val_iter, model, - device=args.gpu), - trigger=validation_trigger) if extensions.PlotReport.available(): trainer.extend(extensions.PlotReport( - ['main/loss', 'validation/main/loss'], x_key='iteration', + ['main/loss'], x_key='iteration', file_name='loss.png')) + trainer.extend(extensions.PlotReport( + ['validation/main/miou'], x_key='iteration', + file_name='miou.png')) trainer.extend(extensions.snapshot_object( model.predictor, filename='model_iteration-{.updater.iteration}', trigger=end_trigger)) trainer.extend(extensions.PrintReport( ['epoch', 'iteration', 'elapsed_time', 'lr', - 'main/loss', 'validation/main/loss']), + 'main/loss', 'validation/main/miou']), trigger=log_trigger) trainer.extend(extensions.ProgressBar(update_interval=10)) + trainer.extend( + SemanticSegmentationEvaluator( + val_iter, + model.predictor, + len(camvid_label_names), + label_names=camvid_label_names), + trigger=validation_trigger) + trainer.run() From 1b0348a9b65fabfb5d45534cfde25c504890a56f Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Tue, 6 Jun 2017 18:01:41 +0900 Subject: [PATCH 02/16] change semantic_segmentation_evaluator to be consistent with other evaluators --- .../semantic_segmentation_evaluator.py | 59 +++++++++++-------- 1 file changed, 36 insertions(+), 23 deletions(-) diff --git a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py b/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py index 8b872669ab..12fe09c279 100644 --- a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py +++ b/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py @@ -13,20 +13,33 @@ class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator): """An extension that evaluates a semantic segmentation model. This extension iterates over an iterator and evaluates the prediction - results of the model by PASCAL VOC's mAP metrics. + results of the model by Intersection over Union (IoU) for each class and + the mean of the IoUs (mIoU). + This extension reports the following values with keys. + Please note that :obj:`'iou/'` is reported only if + :obj:`label_names` is specified. + + * :obj:`'miou'`: Mean of IoUs (mIoU). + * :obj:`'iou/'`: IoU for class \ + :obj:`label_names[l]`, where :math:`l` is the index of the class. \ + For example, this evaluator reports :obj:`'iou/Sky'`, \ + :obj:`'ap/Building'`, etc. if :obj:`label_names` is \ + :obj:`~chainercv.datasets.camvid_label_names`. \ + If there is no label assigned to class :obj:`label_names[l]` \ + in either ground truth or prediction, it reports :obj:`numpy.nan` as \ + its IoU. \ + In this case, IoU is computed without this class. Args: iterator (chainer.Iterator): An iterator. Each sample should be - following tuple :obj:`img, bbox, label` or - :obj:`img, bbox, label, difficult`. - :obj:`img` is an image, :obj:`bbox` is coordinates of bounding - boxes, :obj:`label` is labels of the bounding boxes and - :obj:`difficult` is whether the bounding boxes are difficult or - not. If :obj:`difficult` is returned, difficult ground truth - will be ignored from evaluation. - target (chainer.Link): An detection link. This link must have - :meth:`predict` method which takes a list of images and returns - :obj:`bboxes`, :obj:`labels` and :obj:`scores`. + following tuple :obj:`img, label`. + :obj:`img` is an image, :obj:`label` is pixel-wise label. + target (chainer.Link): A semantic segmentation link. This link should + have :meth:`predict` method which takes a list of images and + returns :obj:`labels`. + label_names (iterable of strings): An iterable of names of classes. + If this value is specified, IoU for each class is + also reported with the key :obj:`'iou/'`. """ @@ -34,16 +47,10 @@ class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator): default_name = 'validation' priority = chainer.training.PRIORITY_WRITER - def __init__(self, iterator, target, n_class, label_names=None): + def __init__(self, iterator, target, label_names=None): super(SemanticSegmentationEvaluator, self).__init__( iterator, target) - self.n_class = n_class - if label_names is not None and len(label_names) != n_class: - raise ValueError('The number of classes and the length of' - 'label_names should be same.') - if label_names is None: - label_names = tuple(range(n_class)) self.label_names = label_names def evaluate(self): @@ -64,12 +71,18 @@ def evaluate(self): pred_labels, = pred_values gt_labels, = gt_values - ious = eval_semantic_segmentation_iou( - pred_labels, gt_labels, self.n_class) + iou = eval_semantic_segmentation_iou(pred_labels, gt_labels) + + report = {'miou': np.nanmean(iou)} + + if self.label_names is not None: + for l, label_name in enumerate(self.label_names): + try: + report['iou/{:s}'.format(label_name)] = iou[l] + except IndexError: + report['iou/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): - for label_name, iou in zip(self.label_names, ious): - reporter.report({'{}/iou'.format(label_name): iou}, target) - reporter.report({'miou': np.nanmean(ious)}, target) + reporter.report(report, target) return observation From 9506e19604c0bf60c72a60be69f638e130a0bbcc Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Tue, 6 Jun 2017 18:17:43 +0900 Subject: [PATCH 03/16] follow style of detection_voc_evaluator --- .../semantic_segmentation_evaluator.py | 2 +- .../test_semantic_segmentation_evaluator.py | 28 +++++++++++-------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py b/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py index 12fe09c279..8126dde91c 100644 --- a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py +++ b/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py @@ -26,7 +26,7 @@ class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator): :obj:`'ap/Building'`, etc. if :obj:`label_names` is \ :obj:`~chainercv.datasets.camvid_label_names`. \ If there is no label assigned to class :obj:`label_names[l]` \ - in either ground truth or prediction, it reports :obj:`numpy.nan` as \ + in ground truth, it reports :obj:`numpy.nan` as \ its IoU. \ In this case, IoU is computed without this class. diff --git a/tests/extensions_tests/semantic_segmentation_tests/test_semantic_segmentation_evaluator.py b/tests/extensions_tests/semantic_segmentation_tests/test_semantic_segmentation_evaluator.py index 472c9637b5..7f49fb70f8 100644 --- a/tests/extensions_tests/semantic_segmentation_tests/test_semantic_segmentation_evaluator.py +++ b/tests/extensions_tests/semantic_segmentation_tests/test_semantic_segmentation_evaluator.py @@ -27,17 +27,17 @@ def predict(self, imgs): class TestSemanticSegmentationEvaluator(unittest.TestCase): def setUp(self): - n_class = 3 self.label_names = ('a', 'b', 'c') imgs = np.random.uniform(size=(10, 3, 5, 5)) + # There are labels for 'a' and 'b', but none for 'c'. labels = np.random.randint( - low=0, high=n_class, size=(10, 5, 5), dtype=np.int32) + low=0, high=2, size=(10, 5, 5), dtype=np.int32) self.dataset = TupleDataset(imgs, labels) self.link = _SemanticSegmentationStubLink(labels) self.iterator = SerialIterator( self.dataset, 5, repeat=False, shuffle=False) self.evaluator = SemanticSegmentationEvaluator( - self.iterator, self.link, n_class, self.label_names) + self.iterator, self.link, self.label_names) def test_evaluate(self): reporter = chainer.Reporter() @@ -47,30 +47,36 @@ def test_evaluate(self): # No observation is reported to the current reporter. Instead the # evaluator collect results in order to calculate their mean. - self.assertEqual(len(reporter.observation), 0) + np.testing.assert_equal(len(reporter.observation), 0) - self.assertEqual(eval_['target/miou'], 1.) - for label_name in self.label_names: - self.assertEqual(eval_['target/{}/iou'.format(label_name)], 1) + np.testing.assert_equal(eval_['target/miou'], 1.) + np.testing.assert_equal(eval_['target/iou/a'], 1.) + np.testing.assert_equal(eval_['target/iou/b'], 1.) + np.testing.assert_equal(eval_['target/iou/c'], np.nan) def test_call(self): eval_ = self.evaluator() # main is used as default - self.assertEqual(eval_['main/miou'], 1) + np.testing.assert_equal(eval_['main/miou'], 1) + np.testing.assert_equal(eval_['main/iou/a'], 1.) + np.testing.assert_equal(eval_['main/iou/b'], 1.) + np.testing.assert_equal(eval_['main/iou/c'], np.nan) def test_evaluator_name(self): self.evaluator.name = 'eval' eval_ = self.evaluator() # name is used as a prefix - self.assertAlmostEqual( - eval_['eval/main/miou'], 1) + self.assertAlmostEqual(eval_['eval/main/miou'], 1) + np.testing.assert_equal(eval_['eval/main/iou/a'], 1.) + np.testing.assert_equal(eval_['eval/main/iou/b'], 1.) + np.testing.assert_equal(eval_['eval/main/iou/c'], np.nan) def test_current_report(self): reporter = chainer.Reporter() with reporter: eval_ = self.evaluator() # The result is reported to the current reporter. - self.assertEqual(reporter.observation, eval_) + np.testing.assert_equal(reporter.observation, eval_) testing.run_module(__name__, __file__) From b68b2e8e47fb181b6ba19f612221a4e02cba8aa3 Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Wed, 14 Jun 2017 09:49:56 +0900 Subject: [PATCH 04/16] fix __init__.py --- chainercv/extensions/__init__.py | 1 + chainercv/extensions/semantic_segmentation/__init__.py | 0 2 files changed, 1 insertion(+) create mode 100644 chainercv/extensions/semantic_segmentation/__init__.py diff --git a/chainercv/extensions/__init__.py b/chainercv/extensions/__init__.py index 2abdb09a67..de25de6544 100644 --- a/chainercv/extensions/__init__.py +++ b/chainercv/extensions/__init__.py @@ -1,2 +1,3 @@ from chainercv.extensions.detection.detection_vis_report import DetectionVisReport # NOQA from chainercv.extensions.detection.detection_voc_evaluator import DetectionVOCEvaluator # NOQA +from chainercv.extensions.semantic_segmentation.semantic_segmentation_evaluator import SemanticSegmentationEvaluator # NOQA diff --git a/chainercv/extensions/semantic_segmentation/__init__.py b/chainercv/extensions/semantic_segmentation/__init__.py new file mode 100644 index 0000000000..e69de29bb2 From 3b298b294b0ad08f0c660ea261a0878643656aa4 Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Wed, 14 Jun 2017 09:53:15 +0900 Subject: [PATCH 05/16] cosmetic --- .../semantic_segmentation/semantic_segmentation_evaluator.py | 1 - 1 file changed, 1 deletion(-) diff --git a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py b/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py index 8126dde91c..af48a86ac5 100644 --- a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py +++ b/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py @@ -50,7 +50,6 @@ class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator): def __init__(self, iterator, target, label_names=None): super(SemanticSegmentationEvaluator, self).__init__( iterator, target) - self.label_names = label_names def evaluate(self): From 2def1dcd0c52cd373cf473f8beadb54d2a1d64df Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Wed, 14 Jun 2017 09:57:25 +0900 Subject: [PATCH 06/16] fix train.py --- examples/segnet/train.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/examples/segnet/train.py b/examples/segnet/train.py index ced9cbe3d3..1ac70edd78 100644 --- a/examples/segnet/train.py +++ b/examples/segnet/train.py @@ -96,10 +96,8 @@ def transform(in_data): trainer.extend( SemanticSegmentationEvaluator( - val_iter, - model.predictor, - len(camvid_label_names), - label_names=camvid_label_names), + val_iter, model, + camvid_label_names), trigger=validation_trigger) trainer.run() From 3fd76c1ec0d03d2480daf5555d1273645e7ff6e4 Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Wed, 14 Jun 2017 12:37:03 +0900 Subject: [PATCH 07/16] fix train.py --- examples/segnet/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/segnet/train.py b/examples/segnet/train.py index 32a154f02d..4f79818ff8 100644 --- a/examples/segnet/train.py +++ b/examples/segnet/train.py @@ -96,7 +96,7 @@ def transform(in_data): trainer.extend( SemanticSegmentationEvaluator( - val_iter, model, + val_iter, model.predictor, camvid_label_names), trigger=validation_trigger) From e5954e95c907143eeac52db2d83695ca3ae853ad Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Wed, 21 Jun 2017 22:57:36 +0900 Subject: [PATCH 08/16] use new evaluation API --- .../semantic_segmentation_evaluator.py | 13 +++++++++---- examples/segnet/train.py | 4 +++- .../test_semantic_segmentation_evaluator.py | 19 +++++++++++++++++-- 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py b/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py index af48a86ac5..73f3557f12 100644 --- a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py +++ b/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py @@ -4,7 +4,7 @@ from chainer import reporter import chainer.training.extensions -from chainercv.evaluations import eval_semantic_segmentation_iou +from chainercv.evaluations import eval_semantic_segmentation from chainercv.utils import apply_prediction_to_iterator @@ -70,16 +70,21 @@ def evaluate(self): pred_labels, = pred_values gt_labels, = gt_values - iou = eval_semantic_segmentation_iou(pred_labels, gt_labels) + result = eval_semantic_segmentation(pred_labels, gt_labels) - report = {'miou': np.nanmean(iou)} + report = {'miou': result['miou'], + 'pixel_accuracy': result['pixel_accuracy'], + 'mean_class_accuracy': result['mean_class_accuracy']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: - report['iou/{:s}'.format(label_name)] = iou[l] + report['iou/{:s}'.format(label_name)] = result['iou'][l] + report['class_accuracy/{:s}'.format(label_name)] =\ + result['class_accuracy'][l] except IndexError: report['iou/{:s}'.format(label_name)] = np.nan + report['class_accuracy/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): diff --git a/examples/segnet/train.py b/examples/segnet/train.py index 32a154f02d..ad63f3e465 100644 --- a/examples/segnet/train.py +++ b/examples/segnet/train.py @@ -90,7 +90,9 @@ def transform(in_data): trigger=end_trigger) trainer.extend(extensions.PrintReport( ['epoch', 'iteration', 'elapsed_time', 'lr', - 'main/loss', 'validation/main/miou']), + 'main/loss', 'validation/main/miou', + 'validation/main/class_accuracy', + 'validation/main/pixel_accuracy']), trigger=log_trigger) trainer.extend(extensions.ProgressBar(update_interval=10)) diff --git a/tests/extensions_tests/semantic_segmentation_tests/test_semantic_segmentation_evaluator.py b/tests/extensions_tests/semantic_segmentation_tests/test_semantic_segmentation_evaluator.py index 7f49fb70f8..db0af61c21 100644 --- a/tests/extensions_tests/semantic_segmentation_tests/test_semantic_segmentation_evaluator.py +++ b/tests/extensions_tests/semantic_segmentation_tests/test_semantic_segmentation_evaluator.py @@ -50,26 +50,41 @@ def test_evaluate(self): np.testing.assert_equal(len(reporter.observation), 0) np.testing.assert_equal(eval_['target/miou'], 1.) + np.testing.assert_equal(eval_['target/pixel_accuracy'], 1.) + np.testing.assert_equal(eval_['target/mean_class_accuracy'], 1.) np.testing.assert_equal(eval_['target/iou/a'], 1.) np.testing.assert_equal(eval_['target/iou/b'], 1.) np.testing.assert_equal(eval_['target/iou/c'], np.nan) + np.testing.assert_equal(eval_['target/class_accuracy/a'], 1.) + np.testing.assert_equal(eval_['target/class_accuracy/b'], 1.) + np.testing.assert_equal(eval_['target/class_accuracy/c'], np.nan) def test_call(self): eval_ = self.evaluator() # main is used as default - np.testing.assert_equal(eval_['main/miou'], 1) + np.testing.assert_equal(eval_['main/miou'], 1.) + np.testing.assert_equal(eval_['main/pixel_accuracy'], 1.) + np.testing.assert_equal(eval_['main/mean_class_accuracy'], 1.) np.testing.assert_equal(eval_['main/iou/a'], 1.) np.testing.assert_equal(eval_['main/iou/b'], 1.) np.testing.assert_equal(eval_['main/iou/c'], np.nan) + np.testing.assert_equal(eval_['main/class_accuracy/a'], 1.) + np.testing.assert_equal(eval_['main/class_accuracy/b'], 1.) + np.testing.assert_equal(eval_['main/class_accuracy/c'], np.nan) def test_evaluator_name(self): self.evaluator.name = 'eval' eval_ = self.evaluator() # name is used as a prefix - self.assertAlmostEqual(eval_['eval/main/miou'], 1) + np.testing.assert_equal(eval_['eval/main/miou'], 1.) + np.testing.assert_equal(eval_['eval/main/pixel_accuracy'], 1.) + np.testing.assert_equal(eval_['eval/main/mean_class_accuracy'], 1.) np.testing.assert_equal(eval_['eval/main/iou/a'], 1.) np.testing.assert_equal(eval_['eval/main/iou/b'], 1.) np.testing.assert_equal(eval_['eval/main/iou/c'], np.nan) + np.testing.assert_equal(eval_['eval/main/class_accuracy/a'], 1.) + np.testing.assert_equal(eval_['eval/main/class_accuracy/b'], 1.) + np.testing.assert_equal(eval_['eval/main/class_accuracy/c'], np.nan) def test_current_report(self): reporter = chainer.Reporter() From 7e7cab58204c12e0ed03e339041c93f848928296 Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Thu, 22 Jun 2017 10:43:41 +0900 Subject: [PATCH 09/16] fix documentations for semantic segmentation evaluator --- .../semantic_segmentation_evaluator.py | 23 +++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py b/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py index 73f3557f12..ecf76bfed6 100644 --- a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py +++ b/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py @@ -13,10 +13,11 @@ class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator): """An extension that evaluates a semantic segmentation model. This extension iterates over an iterator and evaluates the prediction - results of the model by Intersection over Union (IoU) for each class and - the mean of the IoUs (mIoU). + results of the model by common evaluation metrics for semantic + segmentation. This extension reports the following values with keys. - Please note that :obj:`'iou/'` is reported only if + Please note that :obj:`'iou/'` and + :obj:`'class_accuracy/'` are reported only if :obj:`label_names` is specified. * :obj:`'miou'`: Mean of IoUs (mIoU). @@ -28,7 +29,21 @@ class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator): If there is no label assigned to class :obj:`label_names[l]` \ in ground truth, it reports :obj:`numpy.nan` as \ its IoU. \ - In this case, IoU is computed without this class. + In this case, mean IoU is computed without this class. + * :obj:`'mean_class_accuracy'`: Mean of class accuracies. + * :obj:`class_accuracy/'`: Class accuracy for class \ + :obj:`label_names[l]`, where :math:`l` is the index of the class. \ + If there is no label assigned to class :obj:`label_names[l]` \ + in ground truth, it reports :obj:`numpy.nam` as \ + its class accuracy. \ + In this case, mean class accuracy is computed without this class. + * :obj:`pixel_accuracy`: Pixel accuracy. + + For details on the evaluation metrics, please see the documentation + for :func:`chainercv.evaluations.eval_semantic_segmentation`. + + .. seealso:: + :func:`chainercv.evaluations.eval_semantic_segmentation`. Args: iterator (chainer.Iterator): An iterator. Each sample should be From 002a4b520e84aa848ff9b3a2f45d509db534753c Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Thu, 22 Jun 2017 10:49:13 +0900 Subject: [PATCH 10/16] change directory structure --- chainercv/extensions/__init__.py | 6 +++--- .../{detection => evaluator}/__init__.py | 0 .../detection_voc_evaluator.py | 0 .../semantic_segmentation_evaluator.py | 0 .../__init__.py | 0 .../detection_vis_report.py | 0 docs/source/reference/extensions.rst | 18 +++++++++++++----- .../test_detection_voc_evaluator.py | 0 .../test_semantic_segmentation_evaluator.py | 0 .../test_detection_vis_report.py | 0 10 files changed, 16 insertions(+), 8 deletions(-) rename chainercv/extensions/{detection => evaluator}/__init__.py (100%) rename chainercv/extensions/{detection => evaluator}/detection_voc_evaluator.py (100%) rename chainercv/extensions/{semantic_segmentation => evaluator}/semantic_segmentation_evaluator.py (100%) rename chainercv/extensions/{semantic_segmentation => vis_report}/__init__.py (100%) rename chainercv/extensions/{detection => vis_report}/detection_vis_report.py (100%) rename tests/extensions_tests/{detection_tests => evaluator_tests}/test_detection_voc_evaluator.py (100%) rename tests/extensions_tests/{semantic_segmentation_tests => evaluator_tests}/test_semantic_segmentation_evaluator.py (100%) rename tests/extensions_tests/{detection_tests => vis_report_tests}/test_detection_vis_report.py (100%) diff --git a/chainercv/extensions/__init__.py b/chainercv/extensions/__init__.py index de25de6544..5f864ab80b 100644 --- a/chainercv/extensions/__init__.py +++ b/chainercv/extensions/__init__.py @@ -1,3 +1,3 @@ -from chainercv.extensions.detection.detection_vis_report import DetectionVisReport # NOQA -from chainercv.extensions.detection.detection_voc_evaluator import DetectionVOCEvaluator # NOQA -from chainercv.extensions.semantic_segmentation.semantic_segmentation_evaluator import SemanticSegmentationEvaluator # NOQA +from chainercv.extensions.evaluator.detection_voc_evaluator import DetectionVOCEvaluator # NOQA +from chainercv.extensions.evaluator.semantic_segmentation_evaluator import SemanticSegmentationEvaluator # NOQA +from chainercv.extensions.vis_report.detection_vis_report import DetectionVisReport # NOQA diff --git a/chainercv/extensions/detection/__init__.py b/chainercv/extensions/evaluator/__init__.py similarity index 100% rename from chainercv/extensions/detection/__init__.py rename to chainercv/extensions/evaluator/__init__.py diff --git a/chainercv/extensions/detection/detection_voc_evaluator.py b/chainercv/extensions/evaluator/detection_voc_evaluator.py similarity index 100% rename from chainercv/extensions/detection/detection_voc_evaluator.py rename to chainercv/extensions/evaluator/detection_voc_evaluator.py diff --git a/chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py b/chainercv/extensions/evaluator/semantic_segmentation_evaluator.py similarity index 100% rename from chainercv/extensions/semantic_segmentation/semantic_segmentation_evaluator.py rename to chainercv/extensions/evaluator/semantic_segmentation_evaluator.py diff --git a/chainercv/extensions/semantic_segmentation/__init__.py b/chainercv/extensions/vis_report/__init__.py similarity index 100% rename from chainercv/extensions/semantic_segmentation/__init__.py rename to chainercv/extensions/vis_report/__init__.py diff --git a/chainercv/extensions/detection/detection_vis_report.py b/chainercv/extensions/vis_report/detection_vis_report.py similarity index 100% rename from chainercv/extensions/detection/detection_vis_report.py rename to chainercv/extensions/vis_report/detection_vis_report.py diff --git a/docs/source/reference/extensions.rst b/docs/source/reference/extensions.rst index 2189eb1842..96a62ed376 100644 --- a/docs/source/reference/extensions.rst +++ b/docs/source/reference/extensions.rst @@ -4,13 +4,21 @@ Extensions .. module:: chainercv.extensions -Detection +Evaluator --------- -DetectionVisReport -~~~~~~~~~~~~~~~~~~ -.. autofunction:: DetectionVisReport - DetectionVOCEvaluator ~~~~~~~~~~~~~~~~~~~~~ .. autofunction:: DetectionVOCEvaluator + +SemanticSegmentationEvaluator +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: SemanticSegmentationEvaluator + + +Visualization Report +-------------------- + +DetectionVisReport +~~~~~~~~~~~~~~~~~~ +.. autofunction:: DetectionVisReport diff --git a/tests/extensions_tests/detection_tests/test_detection_voc_evaluator.py b/tests/extensions_tests/evaluator_tests/test_detection_voc_evaluator.py similarity index 100% rename from tests/extensions_tests/detection_tests/test_detection_voc_evaluator.py rename to tests/extensions_tests/evaluator_tests/test_detection_voc_evaluator.py diff --git a/tests/extensions_tests/semantic_segmentation_tests/test_semantic_segmentation_evaluator.py b/tests/extensions_tests/evaluator_tests/test_semantic_segmentation_evaluator.py similarity index 100% rename from tests/extensions_tests/semantic_segmentation_tests/test_semantic_segmentation_evaluator.py rename to tests/extensions_tests/evaluator_tests/test_semantic_segmentation_evaluator.py diff --git a/tests/extensions_tests/detection_tests/test_detection_vis_report.py b/tests/extensions_tests/vis_report_tests/test_detection_vis_report.py similarity index 100% rename from tests/extensions_tests/detection_tests/test_detection_vis_report.py rename to tests/extensions_tests/vis_report_tests/test_detection_vis_report.py From c04888444af2e6023177d7030b8cb759fbc6183f Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Thu, 22 Jun 2017 10:57:54 +0900 Subject: [PATCH 11/16] fix doc --- .../semantic_segmentation_evaluator.py | 36 ++++++++++--------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/chainercv/extensions/evaluator/semantic_segmentation_evaluator.py b/chainercv/extensions/evaluator/semantic_segmentation_evaluator.py index ecf76bfed6..7b036c60de 100644 --- a/chainercv/extensions/evaluator/semantic_segmentation_evaluator.py +++ b/chainercv/extensions/evaluator/semantic_segmentation_evaluator.py @@ -15,7 +15,7 @@ class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator): This extension iterates over an iterator and evaluates the prediction results of the model by common evaluation metrics for semantic segmentation. - This extension reports the following values with keys. + This extension reports values with keys below. Please note that :obj:`'iou/'` and :obj:`'class_accuracy/'` are reported only if :obj:`label_names` is specified. @@ -23,21 +23,21 @@ class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator): * :obj:`'miou'`: Mean of IoUs (mIoU). * :obj:`'iou/'`: IoU for class \ :obj:`label_names[l]`, where :math:`l` is the index of the class. \ - For example, this evaluator reports :obj:`'iou/Sky'`, \ - :obj:`'ap/Building'`, etc. if :obj:`label_names` is \ - :obj:`~chainercv.datasets.camvid_label_names`. \ - If there is no label assigned to class :obj:`label_names[l]` \ - in ground truth, it reports :obj:`numpy.nan` as \ - its IoU. \ - In this case, mean IoU is computed without this class. + For example, if :obj:`label_names` is \ + :obj:`~chainercv.datasets.camvid_label_names`, \ + this evaluator reports :obj:`'iou/Sky'`, \ + :obj:`'ap/Building'`, etc. * :obj:`'mean_class_accuracy'`: Mean of class accuracies. - * :obj:`class_accuracy/'`: Class accuracy for class \ - :obj:`label_names[l]`, where :math:`l` is the index of the class. \ - If there is no label assigned to class :obj:`label_names[l]` \ - in ground truth, it reports :obj:`numpy.nam` as \ - its class accuracy. \ - In this case, mean class accuracy is computed without this class. - * :obj:`pixel_accuracy`: Pixel accuracy. + * :obj:`'class_accuracy/'`: Class accuracy for class \ + :obj:`label_names[l]`, where :math:`l` is the index of the class. + * :obj:`'pixel_accuracy'`: Pixel accuracy. + + If there is no label assigned to class :obj:`label_names[l]` + in the ground truth, values corresponding to keys + :obj:`'iou/'` and :obj:`'class_accuracy/'` + are :obj:`numpy.nan`. + In that case, the means of them are calculated by excluding them from + calculation. For details on the evaluation metrics, please see the documentation for :func:`chainercv.evaluations.eval_semantic_segmentation`. @@ -53,8 +53,10 @@ class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator): have :meth:`predict` method which takes a list of images and returns :obj:`labels`. label_names (iterable of strings): An iterable of names of classes. - If this value is specified, IoU for each class is - also reported with the key :obj:`'iou/'`. + If this value is specified, IoU and class accuracy for each class + is also reported with the keys + :obj:`'iou/'` and + :obj:`'class_accuracy/'`. """ From 9fef692fa98b10f13546f57e3a0766766393f7e7 Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Thu, 22 Jun 2017 10:59:20 +0900 Subject: [PATCH 12/16] report mean_class_accuracy --- examples/segnet/train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/segnet/train.py b/examples/segnet/train.py index 4f39699b37..9b012df405 100644 --- a/examples/segnet/train.py +++ b/examples/segnet/train.py @@ -91,6 +91,7 @@ def transform(in_data): trainer.extend(extensions.PrintReport( ['epoch', 'iteration', 'elapsed_time', 'lr', 'main/loss', 'validation/main/miou', + 'validation/main/mean_class_accuracy', 'validation/main/class_accuracy', 'validation/main/pixel_accuracy']), trigger=log_trigger) From 7273241a6bd23244c3d095507deef14f9ed0abff Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Thu, 22 Jun 2017 14:46:21 +0900 Subject: [PATCH 13/16] fix train --- examples/segnet/train.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/segnet/train.py b/examples/segnet/train.py index 9b012df405..249e473641 100644 --- a/examples/segnet/train.py +++ b/examples/segnet/train.py @@ -92,7 +92,6 @@ def transform(in_data): ['epoch', 'iteration', 'elapsed_time', 'lr', 'main/loss', 'validation/main/miou', 'validation/main/mean_class_accuracy', - 'validation/main/class_accuracy', 'validation/main/pixel_accuracy']), trigger=log_trigger) trainer.extend(extensions.ProgressBar(update_interval=10)) From 41be4703f0b71144673f7bb3f094b84fcf612fe8 Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Mon, 3 Jul 2017 18:05:31 +0900 Subject: [PATCH 14/16] autofunctions->autofunction --- docs/source/reference/extensions.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/reference/extensions.rst b/docs/source/reference/extensions.rst index 96a62ed376..066f0fcac1 100644 --- a/docs/source/reference/extensions.rst +++ b/docs/source/reference/extensions.rst @@ -9,11 +9,11 @@ Evaluator DetectionVOCEvaluator ~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: DetectionVOCEvaluator +.. autoclass:: DetectionVOCEvaluator SemanticSegmentationEvaluator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: SemanticSegmentationEvaluator +.. autoclass:: SemanticSegmentationEvaluator Visualization Report @@ -21,4 +21,4 @@ Visualization Report DetectionVisReport ~~~~~~~~~~~~~~~~~~ -.. autofunction:: DetectionVisReport +.. autoclass:: DetectionVisReport From 60d42f71713b6c8289226e542f771d6881334547 Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Mon, 3 Jul 2017 18:07:37 +0900 Subject: [PATCH 15/16] fix doc --- chainercv/extensions/evaluator/detection_voc_evaluator.py | 2 +- .../extensions/evaluator/semantic_segmentation_evaluator.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/chainercv/extensions/evaluator/detection_voc_evaluator.py b/chainercv/extensions/evaluator/detection_voc_evaluator.py index b2255d6d40..8a7936f7bb 100644 --- a/chainercv/extensions/evaluator/detection_voc_evaluator.py +++ b/chainercv/extensions/evaluator/detection_voc_evaluator.py @@ -40,7 +40,7 @@ class DetectionVOCEvaluator(chainer.training.extensions.Evaluator): not. If :obj:`difficult` is returned, difficult ground truth will be ignored from evaluation. target (chainer.Link): A detection link. This link must have - :meth:`predict` method which takes a list of images and returns + :meth:`predict` method that takes a list of images and returns :obj:`bboxes`, :obj:`labels` and :obj:`scores`. use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric for calculating average precision. The default value is diff --git a/chainercv/extensions/evaluator/semantic_segmentation_evaluator.py b/chainercv/extensions/evaluator/semantic_segmentation_evaluator.py index 7b036c60de..5b39dfa26d 100644 --- a/chainercv/extensions/evaluator/semantic_segmentation_evaluator.py +++ b/chainercv/extensions/evaluator/semantic_segmentation_evaluator.py @@ -50,11 +50,11 @@ class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator): following tuple :obj:`img, label`. :obj:`img` is an image, :obj:`label` is pixel-wise label. target (chainer.Link): A semantic segmentation link. This link should - have :meth:`predict` method which takes a list of images and + have :meth:`predict` method that takes a list of images and returns :obj:`labels`. label_names (iterable of strings): An iterable of names of classes. If this value is specified, IoU and class accuracy for each class - is also reported with the keys + are also reported with the keys :obj:`'iou/'` and :obj:`'class_accuracy/'`. From ac503401f02069ef2ab606ec87d7034e8f7b38b5 Mon Sep 17 00:00:00 2001 From: Yusuke Niitani Date: Mon, 3 Jul 2017 18:33:48 +0900 Subject: [PATCH 16/16] make iou/a and iou/b different --- .../test_semantic_segmentation_evaluator.py | 82 ++++++++++++------- 1 file changed, 53 insertions(+), 29 deletions(-) diff --git a/tests/extensions_tests/evaluator_tests/test_semantic_segmentation_evaluator.py b/tests/extensions_tests/evaluator_tests/test_semantic_segmentation_evaluator.py index db0af61c21..b038e43310 100644 --- a/tests/extensions_tests/evaluator_tests/test_semantic_segmentation_evaluator.py +++ b/tests/extensions_tests/evaluator_tests/test_semantic_segmentation_evaluator.py @@ -1,3 +1,5 @@ +from __future__ import division + import numpy as np import unittest @@ -28,12 +30,22 @@ class TestSemanticSegmentationEvaluator(unittest.TestCase): def setUp(self): self.label_names = ('a', 'b', 'c') - imgs = np.random.uniform(size=(10, 3, 5, 5)) + imgs = np.random.uniform(size=(1, 3, 2, 3)) # There are labels for 'a' and 'b', but none for 'c'. - labels = np.random.randint( - low=0, high=2, size=(10, 5, 5), dtype=np.int32) - self.dataset = TupleDataset(imgs, labels) - self.link = _SemanticSegmentationStubLink(labels) + pred_labels = np.array([[[1, 1, 1], [0, 0, 1]]]) + gt_labels = np.array([[[1, 0, 0], [0, -1, 1]]]) + + self.iou_a = 1 / 3 + self.iou_b = 2 / 4 + self.pixel_accuracy = 3 / 5 + self.class_accuracy_a = 1 / 3 + self.class_accuracy_b = 2 / 2 + self.miou = np.mean((self.iou_a, self.iou_b)) + self.mean_class_accuracy = np.mean( + (self.class_accuracy_a, self.class_accuracy_b)) + + self.dataset = TupleDataset(imgs, gt_labels) + self.link = _SemanticSegmentationStubLink(pred_labels) self.iterator = SerialIterator( self.dataset, 5, repeat=False, shuffle=False) self.evaluator = SemanticSegmentationEvaluator( @@ -41,7 +53,7 @@ def setUp(self): def test_evaluate(self): reporter = chainer.Reporter() - reporter.add_observer('target', self.link) + reporter.add_observer('main', self.link) with reporter: eval_ = self.evaluator.evaluate() @@ -49,41 +61,53 @@ def test_evaluate(self): # evaluator collect results in order to calculate their mean. np.testing.assert_equal(len(reporter.observation), 0) - np.testing.assert_equal(eval_['target/miou'], 1.) - np.testing.assert_equal(eval_['target/pixel_accuracy'], 1.) - np.testing.assert_equal(eval_['target/mean_class_accuracy'], 1.) - np.testing.assert_equal(eval_['target/iou/a'], 1.) - np.testing.assert_equal(eval_['target/iou/b'], 1.) - np.testing.assert_equal(eval_['target/iou/c'], np.nan) - np.testing.assert_equal(eval_['target/class_accuracy/a'], 1.) - np.testing.assert_equal(eval_['target/class_accuracy/b'], 1.) - np.testing.assert_equal(eval_['target/class_accuracy/c'], np.nan) + np.testing.assert_equal(eval_['main/miou'], self.miou) + np.testing.assert_equal(eval_['main/pixel_accuracy'], + self.pixel_accuracy) + np.testing.assert_equal(eval_['main/mean_class_accuracy'], + self.mean_class_accuracy) + np.testing.assert_equal(eval_['main/iou/a'], self.iou_a) + np.testing.assert_equal(eval_['main/iou/b'], self.iou_b) + np.testing.assert_equal(eval_['main/iou/c'], np.nan) + np.testing.assert_equal(eval_['main/class_accuracy/a'], + self.class_accuracy_a) + np.testing.assert_equal(eval_['main/class_accuracy/b'], + self.class_accuracy_b) + np.testing.assert_equal(eval_['main/class_accuracy/c'], np.nan) def test_call(self): eval_ = self.evaluator() # main is used as default - np.testing.assert_equal(eval_['main/miou'], 1.) - np.testing.assert_equal(eval_['main/pixel_accuracy'], 1.) - np.testing.assert_equal(eval_['main/mean_class_accuracy'], 1.) - np.testing.assert_equal(eval_['main/iou/a'], 1.) - np.testing.assert_equal(eval_['main/iou/b'], 1.) + np.testing.assert_equal(eval_['main/miou'], self.miou) + np.testing.assert_equal(eval_['main/pixel_accuracy'], + self.pixel_accuracy) + np.testing.assert_equal(eval_['main/mean_class_accuracy'], + self.mean_class_accuracy) + np.testing.assert_equal(eval_['main/iou/a'], self.iou_a) + np.testing.assert_equal(eval_['main/iou/b'], self.iou_b) np.testing.assert_equal(eval_['main/iou/c'], np.nan) - np.testing.assert_equal(eval_['main/class_accuracy/a'], 1.) - np.testing.assert_equal(eval_['main/class_accuracy/b'], 1.) + np.testing.assert_equal(eval_['main/class_accuracy/a'], + self.class_accuracy_a) + np.testing.assert_equal(eval_['main/class_accuracy/b'], + self.class_accuracy_b) np.testing.assert_equal(eval_['main/class_accuracy/c'], np.nan) def test_evaluator_name(self): self.evaluator.name = 'eval' eval_ = self.evaluator() # name is used as a prefix - np.testing.assert_equal(eval_['eval/main/miou'], 1.) - np.testing.assert_equal(eval_['eval/main/pixel_accuracy'], 1.) - np.testing.assert_equal(eval_['eval/main/mean_class_accuracy'], 1.) - np.testing.assert_equal(eval_['eval/main/iou/a'], 1.) - np.testing.assert_equal(eval_['eval/main/iou/b'], 1.) + np.testing.assert_equal(eval_['eval/main/miou'], self.miou) + np.testing.assert_equal(eval_['eval/main/pixel_accuracy'], + self.pixel_accuracy) + np.testing.assert_equal(eval_['eval/main/mean_class_accuracy'], + self.mean_class_accuracy) + np.testing.assert_equal(eval_['eval/main/iou/a'], self.iou_a) + np.testing.assert_equal(eval_['eval/main/iou/b'], self.iou_b) np.testing.assert_equal(eval_['eval/main/iou/c'], np.nan) - np.testing.assert_equal(eval_['eval/main/class_accuracy/a'], 1.) - np.testing.assert_equal(eval_['eval/main/class_accuracy/b'], 1.) + np.testing.assert_equal(eval_['eval/main/class_accuracy/a'], + self.class_accuracy_a) + np.testing.assert_equal(eval_['eval/main/class_accuracy/b'], + self.class_accuracy_b) np.testing.assert_equal(eval_['eval/main/class_accuracy/c'], np.nan) def test_current_report(self):