Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ENH] Calibration plot (add performance curves) and a new Calibrated Learner widget #3881

Merged
merged 21 commits into from
Jul 12, 2019
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
35a6f4b
Calibration plot: Add plots of ca, sens/spec, prec/recall, ppv/npv
janezd Jun 13, 2019
2fa1750
Calibration plot: Add threshold line
janezd Jun 13, 2019
d47b68b
Calibration plot: Refactor computation of metrics
janezd Jun 13, 2019
585feb2
Testing: Keep 2d array of models when splitting Results by models
janezd Jun 13, 2019
7b876e6
Test Learners: Store models when there is just one; properly stack them
janezd Jun 13, 2019
93b7a72
classification: Add ModelWithThreshold
janezd Jun 13, 2019
ff67b49
Calibration plot: Output selected model
janezd Jun 13, 2019
a4424fb
Orange.evaluation.performance_curves: Add module for computation of p…
janezd Jun 16, 2019
6024897
Calibration plot: Use Orange.evaluation.testing.performance_curves to…
janezd Jun 16, 2019
1cfbeec
Calibration plot: Fix selected model output
janezd Jun 17, 2019
f742ff9
OWLearnerWidget: Let default name appear as placeholder. This allows …
janezd Jun 17, 2019
c5d070d
evaluations.testing: Minor fixes in unit tests
janezd Jun 17, 2019
557fa2e
OWTestLearners: Skip inactive signals (e.g. learner widget outputs None)
janezd Jun 17, 2019
1a8b013
Calibrated Learner: Add widget
janezd Jun 17, 2019
6ac1db1
Calibration plot: Add context settings
janezd Jun 17, 2019
2edcb39
OWCalibration Plot: Unit tests and some fixes
janezd Jun 18, 2019
2049afa
Calibration plot: Test missing probabilities and single classes
janezd Jun 19, 2019
04d05f4
Calibration plot: Minor fixes
janezd Jun 24, 2019
6695ee9
Calibrated Learner: Fix report
janezd Jun 28, 2019
65c69e2
Calibrated Learner: Add icon
janezd Jun 28, 2019
864d7b5
Calibration plot: Nicer report
janezd Jun 28, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 23 additions & 40 deletions Orange/widgets/evaluate/contexthandlers.py
Original file line number Diff line number Diff line change
@@ -1,47 +1,30 @@
from Orange.data import Variable
from Orange.widgets import settings
from Orange.widgets.utils import getdeepattr


class EvaluationResultsContextHandler(settings.ContextHandler):
def __init__(self, targetAttr, selectedAttr):
super().__init__()
self.targetAttr, self.selectedAttr = targetAttr, selectedAttr
"""Context handler for evaluation results"""
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is obviously a new context handler with the same name as an old one that nobody used because it probably didn't work and it was weird. ROC Analysis and Lift Curves currently don't have context; they can now have this one.


#noinspection PyMethodOverriding
def match(self, context, cnames, cvalues):
return (cnames, cvalues) == (
context.classifierNames, context.classValues) and 2
def open_context(self, widget, classes, classifier_names):
if isinstance(classes, Variable):
if classes.is_discrete:
classes = classes.values
else:
classes = None
super().open_context(widget, classes, classifier_names)

def fast_save(self, widget, name, value):
context = widget.current_context
if name == self.targetAttr:
context.targetClass = value
elif name == self.selectedAttr:
context.selectedClassifiers = list(value)
def new_context(self, classes, classifier_names):
context = super().new_context()
context.classes = classes
context.classifier_names = classifier_names
return context

def settings_from_widget(self, widget, *args):
super().settings_from_widget(widget, *args)
context = widget.current_context
context.targetClass = getdeepattr(widget, self.targetAttr)
context.selectedClassifiers = list(getdeepattr(self.selectedAttr))

def settings_to_widget(self, widget, *args):
super().settings_to_widget(widget, *args)
context = widget.current_context
if context.targetClass is not None:
setattr(widget, self.targetAttr, context.targetClass)
if context.selectedClassifiers is not None:
setattr(widget, self.selectedAttr, context.selectedClassifiers)

#noinspection PyMethodOverriding
def find_or_create_context(self, widget, results):
cnames = [c.name for c in results.classifiers]
cvalues = results.classValues
context, isNew = super().find_or_create_context(
widget, results.classifierNames, results.classValues)
if isNew:
context.classifierNames = results.classifierNames
context.classValues = results.classValues
context.selectedClassifiers = None
context.targetClass = None
return context, isNew
def match(self, context, classes, classifier_names):
if classifier_names != context.classifier_names:
return self.NO_MATCH
elif isinstance(classes, Variable) and classes.is_continuous:
return (self.PERFECT_MATCH if context.classes is None
else self.NO_MATCH)
else:
return (self.PERFECT_MATCH if context.classes == classes
else self.NO_MATCH)
12 changes: 9 additions & 3 deletions Orange/widgets/evaluate/owcalibrationplot.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
from Orange.evaluation import Results
from Orange.evaluation.performance_curves import Curves
from Orange.widgets import widget, gui, settings
from Orange.widgets.evaluate.contexthandlers import \
EvaluationResultsContextHandler
from Orange.widgets.evaluate.utils import \
check_results_adequacy, results_for_preview
from Orange.widgets.utils import colorpalette, colorbrewer
Expand Down Expand Up @@ -84,9 +86,9 @@ class Information(widget.OWWidget.Information):
no_out + "select a single model - the widget can output only one")
non_binary_class = Msg(no_out + "cannot calibrate non-binary classes")


target_index = settings.Setting(0)
selected_classifiers = settings.Setting([])
settingsHandler = EvaluationResultsContextHandler()
target_index = settings.ContextSetting(0)
selected_classifiers = settings.ContextSetting([])
score = settings.Setting(0)
output_calibration = settings.Setting(0)
fold_curves = settings.Setting(False)
Expand Down Expand Up @@ -168,6 +170,7 @@ def __init__(self):

@Inputs.evaluation_results
def set_results(self, results):
self.closeContext()
self.clear()
results = check_results_adequacy(results, self.Error, check_nan=False)
if results is not None and not results.actual.size:
Expand All @@ -177,6 +180,9 @@ def set_results(self, results):
self.results = results
if self.results is not None:
self._initialize(results)
class_var = self.results.domain.class_var
self.target_index = int(len(class_var.values) == 2)
self.openContext(class_var, self.classifier_names)
self._replot()
self.apply()

Expand Down