Skip to content

Commit

Permalink
Matthieu/42a 234 module 04 ex 08 regularized logisitc (#184)
Browse files Browse the repository at this point in the history
* fix (module04-ex08) add of lambda_ in signature constructor + Examples section to precise the expected behavior

* issue #176: taking into account ternary and class attribute supported_penalities

* inncrement of version

* indentation modification (tab instead of spaces)
  • Loading branch information
madvid authored Mar 11, 2022
1 parent f186e1a commit 4a90a23
Showing 1 changed file with 51 additions and 4 deletions.
55 changes: 51 additions & 4 deletions module09/en.subject.tex
Original file line number Diff line number Diff line change
Expand Up @@ -1057,12 +1057,16 @@ \section*{Instructions}
Description:
My personnal logistic regression to classify things.
"""
def __init__(self, theta, alpha=0.001, max_iter=1000, penalty='l2'):
self.alpha = alpha
supported_penalities = ['l2'] # We consider l2 penality only. One may wants to implement other penalities

def __init__(self, theta, alpha=0.001, max_iter=1000, penalty='l2', lambda_=1.0):
# Check on type, data type, value ... if necessary
self.alpha = alpha
self.max_iter = max_iter
self.theta = theta
self.penalty=penalty
... Your code ...
self.penalty = penalty
self.lambda_ = lambda_ if penality in self.supported_penalities else 0
#... Your code ...

... other methods ...
\end{minted}
Expand All @@ -1075,6 +1079,49 @@ \section*{Instructions}
\end{itemize}
\end{itemize}

% ================================= %
\section*{Examples}
% --------------------------------- %
\begin{minted}[bgcolor=darcula-back,formatcom=\color{lightgrey},fontsize=\scriptsize]{python}
from my_logistic_regression import MyLogisticRegression as mylogr

theta = np.array([[-2.4], [-1.5], [0.3], [-1.4], [0.7]])

# Example 1:
model1 = mylogr(theta, lambda_=5.0)

model1.penality
# Output
'l2'

model1.lambda_
# Output
5.0

# Example 2:
model2 = mylogr(theta, penality=None)

model2.penality
# Output
None

model2.lambda_
# Output
0.0

# Example 3:
model3 = mylogr(theta, penality=None, lambda_=2.0)

model3.penality
# Output
None

model3.lambda_
# Output
0.0

\end{minted}

\hint{
this is also a great use case for decorators...
}
Expand Down

0 comments on commit 4a90a23

Please sign in to comment.