Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Predictions: Output annotated table #6718

Merged
merged 2 commits into from
Feb 7, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 17 additions & 8 deletions Orange/widgets/evaluate/owpredictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.widgets.widget import OWWidget, Msg, Input, Output, MultiInput
from Orange.widgets.utils.itemmodels import TableModel
from Orange.widgets.utils.annotated_data import lazy_annotated_table
from Orange.widgets.utils.sql import check_sql_input
from Orange.widgets.utils.state_summary import format_summary_details
from Orange.widgets.utils.colorpalettes import LimitedDiscretePalette
Expand Down Expand Up @@ -72,7 +73,9 @@ class Inputs:
predictors = MultiInput("Predictors", Model, filter_none=True)

class Outputs:
predictions = Output("Predictions", Orange.data.Table)
selected_predictions = Output("Selected Predictions", Orange.data.Table,
default=True, replaces=["Predictions"])
annotated = Output("Predictions", Orange.data.Table)
evaluation_results = Output("Evaluation Results", Results)

class Warning(OWWidget.Warning):
Expand Down Expand Up @@ -814,7 +817,8 @@ def _commit_evaluation_results(self):

def _commit_predictions(self):
if not self.data:
self.Outputs.predictions.send(None)
self.Outputs.selected_predictions.send(None)
self.Outputs.annotated.send(None)
return

newmetas = []
Expand Down Expand Up @@ -855,12 +859,17 @@ def _commit_predictions(self):
# Reorder rows as they are ordered in view
shown_rows = datamodel.mapFromSourceRows(rows)
rows = rows[numpy.argsort(shown_rows)]
predictions = predictions[rows]
elif datamodel.sortColumn() >= 0 \
or predmodel is not None and predmodel.sortColumn() > 0:
# No selection: output all, but in the shown order
predictions = predictions[datamodel.mapToSourceRows(...)]
self.Outputs.predictions.send(predictions)
selected = predictions[rows]
annotated_data = lazy_annotated_table(predictions, rows)
else:
if datamodel.sortColumn() >= 0 \
or predmodel is not None and predmodel.sortColumn() > 0:
predictions = predictions[datamodel.mapToSourceRows(...)]
selected = predictions
annotated_data = predictions
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Annotated data is missing the Selected attribute when no rows are selected.

self.Outputs.selected_predictions.send(selected)
self.Outputs.annotated.send(annotated_data)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While at it, you can also remove the redundant new line :)


def _add_classification_out_columns(self, slot, newmetas, newcolumns, index):
pred = slot.predictor
Expand Down
94 changes: 70 additions & 24 deletions Orange/widgets/evaluate/tests/test_owpredictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
from Orange.evaluation import Results
from Orange.widgets.tests.utils import excepthook_catch, \
possible_duplicate_table, simulate
from Orange.widgets.utils.annotated_data import ANNOTATED_DATA_FEATURE_NAME
from Orange.widgets.utils.colorpalettes import LimitedDiscretePalette


Expand All @@ -62,7 +63,7 @@ def test_nan_target_input(self):
yvec = data.get_column(data.domain.class_var)
self.send_signal(self.widget.Inputs.data, data)
self.send_signal(self.widget.Inputs.predictors, ConstantLearner()(data), 1)
pred = self.get_output(self.widget.Outputs.predictions)
pred = self.get_output(self.widget.Outputs.selected_predictions)
self.assertIsInstance(pred, Table)
np.testing.assert_array_equal(
yvec, pred.get_column(data.domain.class_var))
Expand Down Expand Up @@ -92,7 +93,7 @@ def test_no_values_target(self):
test = Table(domain, np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]]),
np.full((3, 1), np.nan))
self.send_signal(self.widget.Inputs.data, test)
pred = self.get_output(self.widget.Outputs.predictions)
pred = self.get_output(self.widget.Outputs.selected_predictions)
self.assertEqual(len(pred), len(test))

results = self.get_output(self.widget.Outputs.evaluation_results)
Expand Down Expand Up @@ -145,7 +146,7 @@ def test_no_class_on_test(self):
no_class = titanic.transform(Domain(titanic.domain.attributes, None))
self.send_signal(self.widget.Inputs.predictors, majority_titanic, 1)
self.send_signal(self.widget.Inputs.data, no_class)
out = self.get_output(self.widget.Outputs.predictions)
out = self.get_output(self.widget.Outputs.selected_predictions)
np.testing.assert_allclose(out.get_column("constant"), 0)

predmodel = self.widget.predictionsview.model()
Expand Down Expand Up @@ -500,7 +501,7 @@ def test_unique_output_domain(self):
self.send_signal(self.widget.Inputs.data, data)
self.send_signal(self.widget.Inputs.predictors, predictor)

output = self.get_output(self.widget.Outputs.predictions)
output = self.get_output(self.widget.Outputs.selected_predictions)
self.assertEqual(output.domain.metas[0].name, 'constant (1)')

def test_select(self):
Expand All @@ -515,6 +516,51 @@ def test_select(self):
for index in self.widget.dataview.selectionModel().selectedIndexes()}
self.assertEqual(sel, {(1, col) for col in range(5)})

def test_selection_output(self):
log_reg_iris = LogisticRegressionLearner()(self.iris)
self.send_signal(self.widget.Inputs.predictors, log_reg_iris)
self.send_signal(self.widget.Inputs.data, self.iris)

selmodel = self.widget.dataview.selectionModel()
pred_model = self.widget.predictionsview.model()

selmodel.select(self.widget.dataview.model().index(1, 0), QItemSelectionModel.Select)
selmodel.select(self.widget.dataview.model().index(3, 0), QItemSelectionModel.Select)
output = self.get_output(self.widget.Outputs.selected_predictions)
self.assertEqual(len(output), 2)
self.assertEqual(output[0], self.iris[1])
self.assertEqual(output[1], self.iris[3])
output = self.get_output(self.widget.Outputs.annotated)
self.assertEqual(len(output), len(self.iris))
col = output.get_column(ANNOTATED_DATA_FEATURE_NAME)
self.assertEqual(np.sum(col), 2)
self.assertEqual(col[1], 1)
self.assertEqual(col[3], 1)

pred_model.sort(0)
output = self.get_output(self.widget.Outputs.selected_predictions)
self.assertEqual(len(output), 2)
self.assertEqual(output[0], self.iris[1])
self.assertEqual(output[1], self.iris[3])
output = self.get_output(self.widget.Outputs.annotated)
self.assertEqual(len(output), len(self.iris))
col = output.get_column(ANNOTATED_DATA_FEATURE_NAME)
self.assertEqual(np.sum(col), 2)
self.assertEqual(col[1], 1)
self.assertEqual(col[3], 1)

pred_model.sort(0, Qt.DescendingOrder)
output = self.get_output(self.widget.Outputs.selected_predictions)
self.assertEqual(len(output), 2)
self.assertEqual(output[0], self.iris[3])
self.assertEqual(output[1], self.iris[1])
output = self.get_output(self.widget.Outputs.annotated)
self.assertEqual(len(output), len(self.iris))
col = output.get_column(ANNOTATED_DATA_FEATURE_NAME)
self.assertEqual(np.sum(col), 2)
self.assertEqual(col[1], 1)
self.assertEqual(col[3], 1)

def test_select_data_first(self):
log_reg_iris = LogisticRegressionLearner()(self.iris)
self.send_signal(self.widget.Inputs.data, self.iris)
Expand All @@ -537,7 +583,7 @@ def test_selection_in_setting(self):
for index in widget.dataview.selectionModel().selectedIndexes()}
self.assertEqual(sel, {(row, col)
for row in [1, 3, 4] for col in range(5)})
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
exp = self.iris[np.array([1, 3, 4])]
np.testing.assert_equal(out.X, exp.X)

Expand Down Expand Up @@ -883,32 +929,32 @@ def test_output_wrt_shown_probs_1(self):

widget.shown_probs = widget.NO_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 1, 2])

widget.shown_probs = widget.DATA_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 0, 110, 2, 210, 211])

widget.shown_probs = widget.MODEL_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 111, 2, 210, 211, 212])

widget.shown_probs = widget.BOTH_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 2, 210, 211])

widget.shown_probs = widget.BOTH_PROBS + 1
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 1, 0, 2, 210])

widget.shown_probs = widget.BOTH_PROBS + 2
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 11, 1, 110, 2, 211])

def test_output_wrt_shown_probs_2(self):
Expand All @@ -933,37 +979,37 @@ def test_output_wrt_shown_probs_2(self):

widget.shown_probs = widget.NO_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 1])

widget.shown_probs = widget.DATA_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 0, 1, 110, 111, 112])

widget.shown_probs = widget.MODEL_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 111, 112])

widget.shown_probs = widget.BOTH_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 111, 112])

widget.shown_probs = widget.BOTH_PROBS + 1
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 1, 110])

widget.shown_probs = widget.BOTH_PROBS + 2
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 11, 1, 111])

widget.shown_probs = widget.BOTH_PROBS + 3
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 0, 1, 112])

def test_output_regression(self):
Expand All @@ -973,7 +1019,7 @@ def test_output_regression(self):
LinearRegressionLearner()(self.housing), 0)
self.send_signal(widget.Inputs.predictors,
MeanLearner()(self.housing), 1)
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
np.testing.assert_equal(
out.metas[:, [0, 2]],
np.hstack([pred.results.predicted.T for pred in widget.predictors]))
Expand Down Expand Up @@ -1001,12 +1047,12 @@ def test_classless(self):

widget.shown_probs = widget.NO_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 1, 2])

widget.shown_probs = widget.MODEL_PROBS
widget._commit_predictions()
out = self.get_output(widget.Outputs.predictions)
out = self.get_output(widget.Outputs.selected_predictions)
self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 111, 2, 210, 211, 212])

@patch("Orange.widgets.evaluate.owpredictions.usable_scorers",
Expand Down Expand Up @@ -1047,7 +1093,7 @@ def test_multi_target_input(self):

self.send_signal(widget.Inputs.data, data)
self.send_signal(widget.Inputs.predictors, mock_model, 1)
pred = self.get_output(widget.Outputs.predictions)
pred = self.get_output(widget.Outputs.selected_predictions)
self.assertIsInstance(pred, Table)

def test_error_controls_visibility(self):
Expand Down Expand Up @@ -1202,7 +1248,7 @@ def test_output_error_reg(self):
self.send_signal(self.widget.Inputs.predictors, lin_reg(data), 0)
self.send_signal(self.widget.Inputs.predictors,
LinearRegressionLearner(fit_intercept=False)(data), 1)
pred = self.get_output(self.widget.Outputs.predictions)
pred = self.get_output(self.widget.Outputs.selected_predictions)

names = ["", " (error)"]
names = [f"{n}{i}" for i in ("", " (1)") for n in names]
Expand All @@ -1220,7 +1266,7 @@ def test_output_error_cls(self):
with data.unlocked(data.Y):
data.Y[1] = np.nan
self.send_signal(self.widget.Inputs.data, data)
pred = self.get_output(self.widget.Outputs.predictions)
pred = self.get_output(self.widget.Outputs.selected_predictions)

names = [""] + [f" ({v})" for v in
list(data.domain.class_var.values) + ["error"]]
Expand Down
1 change: 1 addition & 0 deletions i18n/si/msgs.jaml
Original file line number Diff line number Diff line change
Expand Up @@ -8940,6 +8940,7 @@ widgets/evaluate/owpredictions.py:
Data: Podatki
Predictors: Modeli
class `Outputs`:
Selected Predictions: Izbrane napovedi
Predictions: Napovedi
Evaluation Results: Rezultati vrednotenja
class `Warning`:
Expand Down
Loading