Skip to content

Commit

Permalink
Prediction: Output error
Browse files Browse the repository at this point in the history
  • Loading branch information
VesnaT authored and markotoplak committed Sep 15, 2023
1 parent 3dd6d9c commit 2375dc7
Show file tree
Hide file tree
Showing 2 changed files with 65 additions and 10 deletions.
34 changes: 25 additions & 9 deletions Orange/widgets/evaluate/owpredictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,12 @@ def class_var(self):
def is_discrete_class(self):
return bool(self.class_var) and self.class_var.is_discrete

@property
def shown_errors(self):
return self.class_var and (
self.show_probability_errors if self.is_discrete_class
else self.show_reg_errors != NO_ERR)

@Inputs.predictors
def set_predictor(self, index, predictor: Model):
item = self.predictors[index]
Expand Down Expand Up @@ -331,15 +337,14 @@ def _reg_error_changed(self):
self._update_prediction_delegate()

def _update_errors_visibility(self):
shown = self.class_var and (
self.show_probability_errors if self.is_discrete_class
else self.show_reg_errors != NO_ERR)
shown = self.shown_errors
view = self.predictionsview
for col, slot in enumerate(self.predictors):
view.setColumnHidden(
2 * col + 1,
not shown or
self.is_discrete_class is not slot.predictor.domain.has_discrete_class)
self._commit_predictions()

def _set_class_values(self):
self.class_values = []
Expand Down Expand Up @@ -814,12 +819,12 @@ def _commit_predictions(self):

newmetas = []
newcolumns = []
for slot in self._non_errored_predictors():
for i, slot in enumerate(self._non_errored_predictors()):
target = slot.predictor.domain.class_var
if target and target.is_discrete:
self._add_classification_out_columns(slot, newmetas, newcolumns)
self._add_classification_out_columns(slot, newmetas, newcolumns, i)
else:
self._add_regression_out_columns(slot, newmetas, newcolumns)
self._add_regression_out_columns(slot, newmetas, newcolumns, i)

attrs = list(self.data.domain.attributes)
metas = list(self.data.domain.metas)
Expand Down Expand Up @@ -857,7 +862,7 @@ def _commit_predictions(self):
predictions = predictions[datamodel.mapToSourceRows(...)]
self.Outputs.predictions.send(predictions)

def _add_classification_out_columns(self, slot, newmetas, newcolumns):
def _add_classification_out_columns(self, slot, newmetas, newcolumns, index):
pred = slot.predictor
name = pred.name
values = pred.domain.class_var.values
Expand All @@ -877,10 +882,21 @@ def _add_classification_out_columns(self, slot, newmetas, newcolumns):
else:
newcolumns.append(numpy.zeros(probs.shape[0]))

@staticmethod
def _add_regression_out_columns(slot, newmetas, newcolumns):
# Column with error
self._add_error_out_columns(slot, newmetas, newcolumns, index)

def _add_regression_out_columns(self, slot, newmetas, newcolumns, index):
newmetas.append(ContinuousVariable(name=slot.predictor.name))
newcolumns.append(slot.results.unmapped_predicted)
self._add_error_out_columns(slot, newmetas, newcolumns, index)

def _add_error_out_columns(self, slot, newmetas, newcolumns, index):
if self.shown_errors:
name = f"{slot.predictor.name} (error)"
newmetas.append(ContinuousVariable(name=name))
err = self.predictionsview.model().errorColumn(index)
err[err == 2] = numpy.nan
newcolumns.append(err)

def send_report(self):
def merge_data_with_predictions():
Expand Down
41 changes: 40 additions & 1 deletion Orange/widgets/evaluate/tests/test_owpredictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -869,6 +869,7 @@ def test_output_wrt_shown_probs_1(self):
self.send_signal(widget.Inputs.predictors, bayes01, 0)
self.send_signal(widget.Inputs.predictors, bayes12, 1)
self.send_signal(widget.Inputs.predictors, bayes012, 2)
widget.controls.show_probability_errors.setChecked(False)

for i, pred in enumerate(widget.predictors):
p = pred.results.unmapped_probabilities
Expand Down Expand Up @@ -918,6 +919,7 @@ def test_output_wrt_shown_probs_2(self):
self.send_signal(widget.Inputs.data, iris012)
self.send_signal(widget.Inputs.predictors, bayes01, 0)
self.send_signal(widget.Inputs.predictors, bayes012, 1)
widget.controls.show_probability_errors.setChecked(False)

for i, pred in enumerate(widget.predictors):
p = pred.results.unmapped_probabilities
Expand Down Expand Up @@ -968,7 +970,7 @@ def test_output_regression(self):
MeanLearner()(self.housing), 1)
out = self.get_output(widget.Outputs.predictions)
np.testing.assert_equal(
out.metas,
out.metas[:, [0, 2]],
np.hstack([pred.results.predicted.T for pred in widget.predictors]))

def test_classless(self):
Expand Down Expand Up @@ -1188,6 +1190,43 @@ def test_migrate_shown_scores(self):
self.widget.migrate_settings(settings, 1)
self.assertTrue(settings["score_table"]["show_score_hints"]["Sensitivity"])

def test_output_error_reg(self):
data = self.housing
lin_reg = LinearRegressionLearner()
self.send_signal(self.widget.Inputs.data, data)
self.send_signal(self.widget.Inputs.predictors, lin_reg(data), 0)
self.send_signal(self.widget.Inputs.predictors,
LinearRegressionLearner(fit_intercept=False)(data), 1)
pred = self.get_output(self.widget.Outputs.predictions)

names = ["", " (error)"]
names = [f"{n}{i}" for i in ("", " (1)") for n in names]
names = [f"{lin_reg.name}{x}" for x in names]
self.assertEqual(names, [m.name for m in pred.domain.metas])
self.assertAlmostEqual(pred.metas[0, 1], 6.0, 1)
self.assertAlmostEqual(pred.metas[0, 3], 5.1, 1)

def test_output_error_cls(self):
data = self.iris
log_reg = LogisticRegressionLearner()
self.send_signal(self.widget.Inputs.predictors, log_reg(data), 0)
self.send_signal(self.widget.Inputs.predictors,
LogisticRegressionLearner(penalty="l1")(data), 1)
with data.unlocked(data.Y):
data.Y[1] = np.nan
self.send_signal(self.widget.Inputs.data, data)
pred = self.get_output(self.widget.Outputs.predictions)

names = [""] + [f" ({v})" for v in
list(data.domain.class_var.values) + ["error"]]
names = [f"{n}{i}" for i in ("", " (1)") for n in names]
names = [f"{log_reg.name}{x}" for x in names]
self.assertEqual(names, [m.name for m in pred.domain.metas])
self.assertAlmostEqual(pred.metas[0, 4], 0.018, 3)
self.assertAlmostEqual(pred.metas[0, 9], 0.113, 3)
self.assertTrue(np.isnan(pred.metas[1, 4]))
self.assertTrue(np.isnan(pred.metas[1, 9]))


class SelectionModelTest(unittest.TestCase):
def setUp(self):
Expand Down

0 comments on commit 2375dc7

Please sign in to comment.