Skip to content

Commit

Permalink
Merge pull request #6282 from janezd/scorer-registration
Browse files Browse the repository at this point in the history
[ENH] Improve scorer selection in Test and Score and predictions
  • Loading branch information
markotoplak authored Jan 10, 2023
2 parents 3ea1ce8 + 1df2e31 commit 730d256
Show file tree
Hide file tree
Showing 7 changed files with 302 additions and 175 deletions.
34 changes: 34 additions & 0 deletions Orange/evaluation/scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def __new__(mcs, name, bases, dict_, **kwargs):
if not kwargs.get("abstract"):
# Don't use inherited names, look into dict_
cls.name = dict_.get("name", name)
cls.long_name = dict_.get("long_name", cls.name)
cls.registry[name] = cls
else:
cls.registry = {}
Expand Down Expand Up @@ -66,6 +67,9 @@ class Score(metaclass=ScoreMetaType):
name = None
long_name = None #: A short user-readable name (e.g. a few words)

default_visible = True
priority = 100

def __new__(cls, results=None, **kwargs):
self = super().__new__(cls)
if results is not None:
Expand Down Expand Up @@ -136,7 +140,9 @@ def is_compatible(domain: Domain) -> bool:
# pylint: disable=invalid-name
class CA(ClassificationScore):
__wraps__ = skl_metrics.accuracy_score
name = "CA"
long_name = "Classification accuracy"
priority = 20


class PrecisionRecallFSupport(ClassificationScore):
Expand Down Expand Up @@ -185,14 +191,21 @@ def compute_score(self, results, target=None, average='binary'):

class Precision(TargetScore):
__wraps__ = skl_metrics.precision_score
name = "Prec"
long_name = "Precision"
priority = 40


class Recall(TargetScore):
__wraps__ = skl_metrics.recall_score
name = long_name = "Recall"
priority = 50


class F1(TargetScore):
__wraps__ = skl_metrics.f1_score
name = long_name = "F1"
priority = 30


class AUC(ClassificationScore):
Expand All @@ -210,7 +223,9 @@ class AUC(ClassificationScore):
__wraps__ = skl_metrics.roc_auc_score
separate_folds = True
is_binary = True
name = "AUC"
long_name = "Area under ROC curve"
priority = 10

@staticmethod
def calculate_weights(results):
Expand Down Expand Up @@ -282,6 +297,10 @@ class LogLoss(ClassificationScore):
"""
__wraps__ = skl_metrics.log_loss
priority = 120
name = "LogLoss"
long_name = "Logistic loss"
default_visible = False

def compute_score(self, results, eps=1e-15, normalize=True,
sample_weight=None):
Expand All @@ -297,6 +316,10 @@ def compute_score(self, results, eps=1e-15, normalize=True,

class Specificity(ClassificationScore):
is_binary = True
priority = 110
name = "Spec"
long_name = "Specificity"
default_visible = False

@staticmethod
def calculate_weights(results):
Expand Down Expand Up @@ -349,29 +372,40 @@ def compute_score(self, results, target=None, average="binary"):

class MSE(RegressionScore):
__wraps__ = skl_metrics.mean_squared_error
name = "MSE"
long_name = "Mean square error"
priority = 20


class RMSE(RegressionScore):
name = "RMSE"
long_name = "Root mean square error"

def compute_score(self, results):
return np.sqrt(MSE(results))
priority = 30


class MAE(RegressionScore):
__wraps__ = skl_metrics.mean_absolute_error
name = "MAE"
long_name = "Mean absolute error"
priority = 40


# pylint: disable=invalid-name
class R2(RegressionScore):
__wraps__ = skl_metrics.r2_score
name = "R2"
long_name = "Coefficient of determination"
priority = 50


class CVRMSE(RegressionScore):
name = "CVRMSE"
long_name = "Coefficient of variation of the RMSE"
priority = 110
default_visible = False

def compute_score(self, results):
mean = np.nanmean(results.actual)
Expand Down
8 changes: 8 additions & 0 deletions Orange/widgets/evaluate/owpredictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ class OWPredictions(OWWidget):
description = "Display predictions of models for an input dataset."
keywords = []

settings_version = 2

want_control_area = False

class Inputs:
Expand Down Expand Up @@ -941,6 +943,12 @@ def showEvent(self, event):
super().showEvent(event)
QTimer.singleShot(0, self._update_splitter)

@classmethod
def migrate_settings(cls, settings, version):
if version < 2:
if "score_table" in settings:
ScoreTable.migrate_to_show_scores_hints(settings["score_table"])


class ItemDelegate(TableDataDelegate):
def initStyleOption(self, option, index):
Expand Down
12 changes: 6 additions & 6 deletions Orange/widgets/evaluate/owtestandscore.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,12 +150,8 @@ class Outputs:
predictions = Output("Predictions", Table)
evaluations_results = Output("Evaluation Results", Results)

settings_version = 3
settings_version = 4
buttons_area_orientation = None
UserAdviceMessages = [
widget.Message(
"Click on the table header to select shown columns",
"click_header")]

settingsHandler = settings.PerfectDomainContextHandler()
score_table = settings.SettingProvider(ScoreTable)
Expand Down Expand Up @@ -655,7 +651,8 @@ def update_stats_model(self):
item.setData(float(stat.value[0]), Qt.DisplayRole)
else:
item.setToolTip(str(stat.exception))
if scorer.name in self.score_table.shown_scores:
# pylint: disable=unsubscriptable-object
if self.score_table.show_score_hints[scorer.__name__]:
has_missing_scores = True
row.append(item)

Expand Down Expand Up @@ -899,6 +896,9 @@ def migrate_settings(cls, settings_, version):
settings_["context_settings"] = [
c for c in settings_.get("context_settings", ())
if not hasattr(c, 'classes')]
if version < 4:
if "score_table" in settings_:
ScoreTable.migrate_to_show_scores_hints(settings_["score_table"])

@Slot(float)
def setProgressValue(self, value):
Expand Down
5 changes: 5 additions & 0 deletions Orange/widgets/evaluate/tests/test_owpredictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -1168,6 +1168,11 @@ def test_report(self):
widget.send_report()
self.assertIn(value, widget.report_paragraph.call_args[0][1])

def test_migrate_shown_scores(self):
settings = {"score_table": {"shown_scores": {"Sensitivity"}}}
self.widget.migrate_settings(settings, 1)
self.assertTrue(settings["score_table"]["show_score_hints"]["Sensitivity"])


class SelectionModelTest(unittest.TestCase):
def setUp(self):
Expand Down
68 changes: 38 additions & 30 deletions Orange/widgets/evaluate/tests/test_owtestandscore.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
from Orange.regression import MeanLearner
from Orange.widgets.evaluate.owtestandscore import (
OWTestAndScore, results_one_vs_rest)
from Orange.widgets.evaluate.utils import BUILTIN_SCORERS_ORDER
from Orange.widgets.settings import (
ClassValuesContextHandler, PerfectDomainContextHandler)
from Orange.widgets.tests.base import WidgetTest
Expand Down Expand Up @@ -154,6 +153,11 @@ def test_migrate_removes_invalid_contexts(self):
self.widget.migrate_settings(settings, 2)
self.assertEqual(settings['context_settings'], [context_valid])

def test_migrate_shown_scores(self):
settings = {"score_table": {"shown_scores": {"Sensitivity"}}}
self.widget.migrate_settings(settings, 3)
self.assertTrue(settings["score_table"]["show_score_hints"]["Sensitivity"])

def test_memory_error(self):
"""
Handling memory error.
Expand Down Expand Up @@ -225,38 +229,39 @@ def test_addon_scorers(self):
# These classes are registered, pylint: disable=unused-variable
class NewScore(Score):
class_types = (DiscreteVariable, ContinuousVariable)
name = "new scorer"

@staticmethod
def is_compatible(domain: Domain) -> bool:
return True

class NewClassificationScore(ClassificationScore):
pass
name = "new classification scorer"
default_visible = False

class NewRegressionScore(RegressionScore):
pass

builtins = BUILTIN_SCORERS_ORDER
self.send_signal("Data", Table("iris"))
scorer_names = [scorer.name for scorer in self.widget.scorers]
self.assertEqual(
tuple(scorer_names[:len(builtins[DiscreteVariable])]),
builtins[DiscreteVariable])
self.assertIn("NewScore", scorer_names)
self.assertIn("NewClassificationScore", scorer_names)
widget = self.create_widget(OWTestAndScore)
header = widget.score_table.view.horizontalHeader()
self.send_signal(widget.Inputs.train_data, Table("iris"))
scorer_names = [scorer.name for scorer in widget.scorers]
self.assertIn("new scorer", scorer_names)
self.assertFalse(header.isSectionHidden(3 + scorer_names.index("new scorer")))
self.assertIn("new classification scorer", scorer_names)
self.assertTrue(header.isSectionHidden(3 + scorer_names.index("new classification scorer")))
self.assertNotIn("NewRegressionScore", scorer_names)
model = widget.score_table.model


self.send_signal("Data", Table("housing"))
scorer_names = [scorer.name for scorer in self.widget.scorers]
self.assertEqual(
tuple(scorer_names[:len(builtins[ContinuousVariable])]),
builtins[ContinuousVariable])
self.assertIn("NewScore", scorer_names)
self.assertNotIn("NewClassificationScore", scorer_names)
self.send_signal(widget.Inputs.train_data, Table("housing"))
scorer_names = [scorer.name for scorer in widget.scorers]
self.assertIn("new scorer", scorer_names)
self.assertNotIn("new classification scorer", scorer_names)
self.assertIn("NewRegressionScore", scorer_names)

self.send_signal("Data", None)
self.assertEqual(self.widget.scorers, [])
self.send_signal(widget.Inputs.train_data, None)
self.assertEqual(widget.scorers, [])
finally:
del Score.registry["NewScore"] # pylint: disable=no-member
del Score.registry["NewClassificationScore"] # pylint: disable=no-member
Expand Down Expand Up @@ -320,7 +325,7 @@ def __call__(self, data):
header = view.horizontalHeader()
p = header.rect().center()
# second visible header section (after 'Model')
_, idx, *_ = (i for i in range(header.count())
_, _, idx, *_ = (i for i in range(header.count())
if not header.isSectionHidden(i))
p.setX(header.sectionPosition(idx) + 5)
QTest.mouseClick(header.viewport(), Qt.LeftButton, pos=p)
Expand Down Expand Up @@ -718,11 +723,13 @@ def test_copy_to_clipboard(self):
selection_model = view.selectionModel()
selection_model.select(model.index(0, 0),
selection_model.Select | selection_model.Rows)

self.widget.copy_to_clipboard()
clipboard_text = QApplication.clipboard().text()
# Tests appear to register additional scorers, so we clip the list
# to what we know to be there and visible
clipboard_text = "\t".join(clipboard_text.split("\t")[:6]).strip()
view_text = "\t".join([str(model.data(model.index(0, i)))
for i in (0, 3, 4, 5, 6, 7)]) + "\r\n"
for i in (0, 3, 4, 5, 6, 7)]).strip()
self.assertEqual(clipboard_text, view_text)

def test_multi_target_input(self):
Expand Down Expand Up @@ -752,14 +759,15 @@ def compute_score(self, results):
mock_learner = Mock(spec=Learner, return_value=mock_model)
mock_learner.name = 'Mockery'

self.widget.resampling = OWTestAndScore.TestOnTrain
self.send_signal(self.widget.Inputs.train_data, data)
self.send_signal(self.widget.Inputs.learner, MajorityLearner(), 0)
self.send_signal(self.widget.Inputs.learner, mock_learner, 1)
_ = self.get_output(self.widget.Outputs.evaluations_results, wait=5000)
self.assertTrue(len(self.widget.scorers) == 1)
self.assertTrue(NewScorer in self.widget.scorers)
self.assertTrue(len(self.widget._successful_slots()) == 1)
widget = self.create_widget(OWTestAndScore)
widget.resampling = OWTestAndScore.TestOnTrain
self.send_signal(widget.Inputs.train_data, data)
self.send_signal(widget.Inputs.learner, MajorityLearner(), 0)
self.send_signal(widget.Inputs.learner, mock_learner, 1)
_ = self.get_output(widget.Outputs.evaluations_results, wait=5000)
self.assertTrue(len(widget.scorers) == 1)
self.assertTrue(NewScorer in widget.scorers)
self.assertTrue(len(widget._successful_slots()) == 1)


class TestHelpers(unittest.TestCase):
Expand Down
Loading

0 comments on commit 730d256

Please sign in to comment.