Skip to content

Commit

Permalink
Calibration plot: Add context settings
Browse files Browse the repository at this point in the history
  • Loading branch information
janezd committed Jun 17, 2019
1 parent dea15bf commit 064c928
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 43 deletions.
63 changes: 23 additions & 40 deletions Orange/widgets/evaluate/contexthandlers.py
Original file line number Diff line number Diff line change
@@ -1,47 +1,30 @@
from Orange.data import Variable
from Orange.widgets import settings
from Orange.widgets.utils import getdeepattr


class EvaluationResultsContextHandler(settings.ContextHandler):
def __init__(self, targetAttr, selectedAttr):
super().__init__()
self.targetAttr, self.selectedAttr = targetAttr, selectedAttr
"""Context handler for evaluation results"""

#noinspection PyMethodOverriding
def match(self, context, cnames, cvalues):
return (cnames, cvalues) == (
context.classifierNames, context.classValues) and 2
def open_context(self, widget, classes, classifier_names):
if isinstance(classes, Variable):
if classes.is_discrete:
classes = classes.values
else:
classes = None
super().open_context(widget, classes, classifier_names)

def fast_save(self, widget, name, value):
context = widget.current_context
if name == self.targetAttr:
context.targetClass = value
elif name == self.selectedAttr:
context.selectedClassifiers = list(value)
def new_context(self, classes, classifier_names):
context = super().new_context()
context.classes = classes
context.classifier_names = classifier_names
return context

def settings_from_widget(self, widget, *args):
super().settings_from_widget(widget, *args)
context = widget.current_context
context.targetClass = getdeepattr(widget, self.targetAttr)
context.selectedClassifiers = list(getdeepattr(self.selectedAttr))

def settings_to_widget(self, widget, *args):
super().settings_to_widget(widget, *args)
context = widget.current_context
if context.targetClass is not None:
setattr(widget, self.targetAttr, context.targetClass)
if context.selectedClassifiers is not None:
setattr(widget, self.selectedAttr, context.selectedClassifiers)

#noinspection PyMethodOverriding
def find_or_create_context(self, widget, results):
cnames = [c.name for c in results.classifiers]
cvalues = results.classValues
context, isNew = super().find_or_create_context(
widget, results.classifierNames, results.classValues)
if isNew:
context.classifierNames = results.classifierNames
context.classValues = results.classValues
context.selectedClassifiers = None
context.targetClass = None
return context, isNew
def match(self, context, classes, classifier_names):
if classifier_names != context.classifier_names:
return self.NO_MATCH
elif isinstance(classes, Variable) and classes.is_continuous:
return (self.PERFECT_MATCH if context.classes is None
else self.NO_MATCH)
else:
return (self.PERFECT_MATCH if context.classes == classes
else self.NO_MATCH)
12 changes: 9 additions & 3 deletions Orange/widgets/evaluate/owcalibrationplot.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
from Orange.evaluation import Results
from Orange.evaluation.performance_curves import Curves
from Orange.widgets import widget, gui, settings
from Orange.widgets.evaluate.contexthandlers import \
EvaluationResultsContextHandler
from Orange.widgets.evaluate.utils import \
check_results_adequacy, results_for_preview
from Orange.widgets.utils import colorpalette, colorbrewer
Expand Down Expand Up @@ -84,9 +86,9 @@ class Information(widget.OWWidget.Information):
no_out + "select a single model - the widget can output only one")
non_binary_class = Msg(no_out + "cannot calibrate non-binary classes")


target_index = settings.Setting(0)
selected_classifiers = settings.Setting([])
settingsHandler = EvaluationResultsContextHandler()
target_index = settings.ContextSetting(0)
selected_classifiers = settings.ContextSetting([])
score = settings.Setting(0)
output_calibration = settings.Setting(0)
fold_curves = settings.Setting(False)
Expand Down Expand Up @@ -168,6 +170,7 @@ def __init__(self):

@Inputs.evaluation_results
def set_results(self, results):
self.closeContext()
self.clear()
results = check_results_adequacy(results, self.Error, check_nan=False)
if results is not None and not results.actual.size:
Expand All @@ -177,6 +180,9 @@ def set_results(self, results):
self.results = results
if self.results is not None:
self._initialize(results)
class_var = self.results.domain.class_var
self.target_index = int(len(class_var.values) == 2)
self.openContext(class_var, self.classifier_names)
self._replot()
self.apply()

Expand Down

0 comments on commit 064c928

Please sign in to comment.