diff --git a/Orange/classification/sgd.py b/Orange/classification/sgd.py index 0dce4d34b5e..84739bf0eeb 100644 --- a/Orange/classification/sgd.py +++ b/Orange/classification/sgd.py @@ -14,9 +14,9 @@ class SGDClassificationLearner(SklLearner): preprocessors = SklLearner.preprocessors + [Normalize()] def __init__(self, loss='hinge', penalty='l2', alpha=0.0001, - l1_ratio=0.15,fit_intercept=True, n_iter=5, shuffle=True, - epsilon=0.1, random_state=None, learning_rate='invscaling', - eta0=0.01, power_t=0.25, warm_start=False, average=False, - preprocessors=None): + l1_ratio=0.15, fit_intercept=True, max_iter=5, + tol=None, shuffle=True, epsilon=0.1, random_state=None, + learning_rate='invscaling', eta0=0.01, power_t=0.25, + warm_start=False, average=False, preprocessors=None): super().__init__(preprocessors=preprocessors) self.params = vars() diff --git a/Orange/regression/linear.py b/Orange/regression/linear.py index 0186483ee0b..549782ce3d8 100644 --- a/Orange/regression/linear.py +++ b/Orange/regression/linear.py @@ -84,8 +84,8 @@ class SGDRegressionLearner(LinearRegressionLearner): preprocessors = SklLearner.preprocessors + [Normalize()] def __init__(self, loss='squared_loss', penalty='l2', alpha=0.0001, - l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, - epsilon=0.1, n_jobs=1, random_state=None, + l1_ratio=0.15, fit_intercept=True, max_iter=5, tol=None, + shuffle=True, epsilon=0.1, n_jobs=1, random_state=None, learning_rate='invscaling', eta0=0.01, power_t=0.25, class_weight=None, warm_start=False, average=False, preprocessors=None): diff --git a/Orange/widgets/model/owsgd.py b/Orange/widgets/model/owsgd.py index 0f40ee8fbfa..14ddffeaa8a 100644 --- a/Orange/widgets/model/owsgd.py +++ b/Orange/widgets/model/owsgd.py @@ -24,6 +24,8 @@ class OWSGD(OWBaseLearner): ] priority = 90 + settings_version = 2 + LEARNER = SGDLearner class Outputs(OWBaseLearner.Outputs): @@ -76,7 +78,9 @@ class Outputs(OWBaseLearner.Outputs): learning_rate_index = Setting(0) eta0 = Setting(.01) power_t = Setting(.25) - n_iter = Setting(5) + max_iter = Setting(1000) + tol = Setting(1e-3) + tol_enabled = Setting(True) def add_main_layout(self): self._add_algorithm_to_layout() @@ -150,10 +154,17 @@ def _add_learning_params_to_layout(self): callback=self.settings_changed) gui.separator(box, height=12) - self.n_iter_spin = gui.spin( - box, self, 'n_iter', 1, MAXINT - 1, label='Number of iterations: ', + self.max_iter_spin = gui.spin( + box, self, 'max_iter', 1, MAXINT - 1, label='Number of iterations: ', controlWidth=80, alignment=Qt.AlignRight, callback=self.settings_changed) + + self.tol_spin = gui.spin( + box, self, 'tol', 0, 10., .1e-3, spinType=float, controlWidth=80, + label='Tolerance (stopping criterion): ', checked='tol_enabled', + alignment=Qt.AlignRight, callback=self.settings_changed) + gui.separator(box, height=12) + # Wrap shuffle_cbx inside another hbox to align it with the random_seed # spin box on OSX self.shuffle_cbx = gui.checkBox( @@ -246,7 +257,8 @@ def create_learner(self): learning_rate=self.learning_rates[self.learning_rate_index][1], eta0=self.eta0, power_t=self.power_t, - n_iter=self.n_iter, + max_iter=self.max_iter, + tol=self.tol if self.tol_enabled else None, preprocessors=self.preprocessors, **params) @@ -306,6 +318,12 @@ def update_model(self): coeffs.name = "coefficients" self.Outputs.coefficients.send(coeffs) + @classmethod + def migrate_settings(cls, settings_, version): + if version < 2: + settings_["max_iter"] = settings_.pop("n_iter", 5) + settings_["tol_enabled"] = False + if __name__ == '__main__': import sys