-
Notifications
You must be signed in to change notification settings - Fork 414
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
The 'penalty' parameter of LogisticRegression must be a str among {'l1', 'l2', 'elasticnet'} or None. Got 'none' instead. #265
Comments
你好,已收到,谢谢。
|
plt_overfit.py is shown below: def map_one_feature(X1, degree): def map_feature(X1, X2, degree):
def munge(base, exp): def plot_decision_boundary(ax, x0r,x1r, predict, w, b, scaler = False, mu=None, sigma=None, degree=None):
use this to test the above routinedef plot_decision_boundary_sklearn(x0r, x1r, predict, degree, scaler = False):
#for debug, uncomment the #@output statments below for routines you want to get error output from In the notebook that will call these routines, import
|
@hu-minghao @liuyameng128 According to this post sklearn LogisticRegression without regularization, on sklearn 0.21 and higher versions, the regularization can be disabled by passing the def logistic_regression(self):
self.ax[0].clear()
self.fig.canvas.draw()
# create and fit the model using our mapped_X feature set.
self.X_mapped, _ = map_feature(self.X[:, 0], self.X[:, 1], self.degree)
self.X_mapped_scaled, self.X_mu, self.X_sigma = zscore_normalize_features(self.X_mapped)
if not self.regularize or self.lambda_ == 0:
lr = LogisticRegression(penalty = None, max_iter = 10000) #line fixed
else:
C = 1/self.lambda_
lr = LogisticRegression(C = C, max_iter = 10000, penalty = None) #line fixed
lr.fit(self.X_mapped_scaled, self.y)
#print(lr.score(self.X_mapped_scaled, self.y))
self.w = lr.coef_.reshape(-1, )
self.b = lr.intercept_
#print(self.w, self.b)
self.logistic_data(redraw=True)
self.contour = plot_decision_boundary(self.ax[0], [-1, 1], [-1, 1], predict_logistic, self.w, self.b,
scaler = True, mu = self.X_mu, sigma = self.X_sigma, degree = self.degree )
self.fig.canvas.draw() |
`%matplotlib widget
import matplotlib.pyplot as plt
from ipywidgets import Output
from plt_overfit import overfit_example, output
plt.style.use('./deeplearning.mplstyle')
plt.close("all")
display(output)
ofit = overfit_example(False)`
error message:
`InvalidParameterError Traceback (most recent call last)
File D:\Python\Lib\site-packages\ipywidgets\widgets\widget_output.py:103, in Output.capture..capture_decorator..inner(*args, **kwargs)
101 self.clear_output(*clear_args, **clear_kwargs)
102 with self:
--> 103 return func(*args, **kwargs)
File D:\ppt\jupyter数据处理\week3\Optional Labs\plt_overfit.py:323, in overfit_example.fitdata_clicked(self, event)
320 @output.capture() # debug
321 def fitdata_clicked(self,event):
322 if self.logistic:
--> 323 self.logistic_regression()
324 else:
325 self.linear_regression()
File D:\ppt\jupyter数据处理\week3\Optional Labs\plt_overfit.py:365, in overfit_example.logistic_regression(self)
362 C = 1/self.lambda_
363 lr = LogisticRegression(C=C, max_iter=10000)
--> 365 lr.fit(self.X_mapped_scaled,self.y)
366 #print(lr.score(self.X_mapped_scaled, self.y))
367 self.w = lr.coef_.reshape(-1,)
File D:\Python\Lib\site-packages\sklearn\base.py:1467, in _fit_context..decorator..wrapper(estimator, *args, **kwargs)
1462 partial_fit_and_fitted = (
1463 fit_method.name == "partial_fit" and _is_fitted(estimator)
1464 )
1466 if not global_skip_validation and not partial_fit_and_fitted:
-> 1467 estimator._validate_params()
1469 with config_context(
1470 skip_parameter_validation=(
1471 prefer_skip_nested_validation or global_skip_validation
1472 )
1473 ):
1474 return fit_method(estimator, *args, **kwargs)
File D:\Python\Lib\site-packages\sklearn\base.py:666, in BaseEstimator._validate_params(self)
658 def _validate_params(self):
659 """Validate types and values of constructor parameters
660
661 The expected type and values must be defined in the
_parameter_constraints
(...)
664 accepted constraints.
665 """
--> 666 validate_parameter_constraints(
667 self._parameter_constraints,
668 self.get_params(deep=False),
669 caller_name=self.class.name,
670 )
File D:\Python\Lib\site-packages\sklearn\utils_param_validation.py:95, in validate_parameter_constraints(parameter_constraints, params, caller_name)
89 else:
90 constraints_str = (
91 f"{', '.join([str(c) for c in constraints[:-1]])} or"
92 f" {constraints[-1]}"
93 )
---> 95 raise InvalidParameterError(
96 f"The {param_name!r} parameter of {caller_name} must be"
97 f" {constraints_str}. Got {param_val!r} instead."
98 )
InvalidParameterError: The 'penalty' parameter of LogisticRegression must be a str among {'l1', 'l2', 'elasticnet'} or None. Got 'none' instead.`
The text was updated successfully, but these errors were encountered: