Skip to content

Commit 966e903

Browse files
committed
Pep8 compliant (almost)
1 parent 1589fb4 commit 966e903

15 files changed

Lines changed: 145 additions & 120 deletions

dirichletcal/__init__.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ def __init__(self, matrix_type='full', l2=0.0, comp_l2=False,
2020
self.comp_l2 = comp_l2
2121
self.initializer = initializer
2222

23-
2423
def __setup(self):
2524
if isinstance(self.l2, list):
2625
self.l2_grid = self.l2
@@ -32,16 +31,15 @@ def __setup(self):
3231
self.comp_l2 = [self.comp_l2]
3332
self.calibrator_ = None
3433

35-
3634
def fit(self, x, y, x_val=None, y_val=None, **kwargs):
3735
self.__setup()
3836

3937
if self.matrix_type == 'diagonal':
4038
self.calibrator_ = DiagonalDirichletCalibrator(
41-
l2=self.l2, initializer=self.initializer)
39+
reg_lambda=self.l2, initializer=self.initializer)
4240
elif self.matrix_type == 'full':
4341
self.calibrator_ = FullDirichletCalibrator(
44-
reg_lambda_list=self.l2_grid, reg_mu_list=self.comp_l2,
42+
reg_lambda=self.l2, reg_mu=self.comp_l2,
4543
initializer=self.initializer)
4644
else:
4745
raise ValueError
@@ -62,25 +60,29 @@ def fit(self, x, y, x_val=None, y_val=None, **kwargs):
6260

6361
@property
6462
def l2_(self):
65-
if (self.calibrator_ is not None) and (hasattr(self.calibrator_, 'l2')):
63+
if ((self.calibrator_ is not None) and (hasattr(self.calibrator_,
64+
'l2'))):
6665
return self.calibrator_.l2
6766
return None
6867

6968
@property
7069
def weights_(self):
71-
if (self.calibrator_ is not None) and (hasattr(self.calibrator_, 'weights_')):
70+
if ((self.calibrator_ is not None) and (hasattr(self.calibrator_,
71+
'weights_'))):
7272
return self.calibrator_.weights_
7373
return None
7474

7575
@property
7676
def coef_(self):
77-
if (self.calibrator_ is not None) and (hasattr(self.calibrator_, 'coef_')):
77+
if ((self.calibrator_ is not None) and (hasattr(self.calibrator_,
78+
'coef_'))):
7879
return self.calibrator_.coef_
7980
return None
8081

8182
@property
8283
def intercept_(self):
83-
if (self.calibrator_ is not None) and (hasattr(self.calibrator_, 'intercept_')):
84+
if (self.calibrator_ is not None) and (hasattr(self.calibrator_,
85+
'intercept_')):
8486
return self.calibrator_.intercept_
8587
return None
8688

Lines changed: 20 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,29 @@
11
import numpy as np
2-
32
from .multinomial import MultinomialRegression
43
from .fulldirichlet import FullDirichletCalibrator
54
from ..utils import clip_for_log
5+
from sklearn.metrics import log_loss
66

77

88
class DiagonalDirichletCalibrator(FullDirichletCalibrator):
9+
def fit(self, X, y, X_val=None, y_val=None, *args, **kwargs):
10+
11+
self.weights_ = self.weights_init
12+
13+
if X_val is None:
14+
X_val = X.copy()
15+
y_val = y.copy()
16+
17+
_X = np.copy(X)
18+
_X = np.log(clip_for_log(_X))
19+
_X_val = np.copy(X_val)
20+
_X_val = np.log(clip_for_log(X_val))
21+
22+
self.calibrator_ = MultinomialRegression(
23+
method='Diag', reg_lambda=self.reg_lambda, reg_mu=self.reg_mu,
24+
reg_norm=self.reg_norm, ref_row=self.ref_row,
25+
optimizer=self.optimizer)
26+
self.calibrator_.fit(_X, y, *args, **kwargs)
27+
self.final_loss = log_loss(y_val, self.calibrator_.predict_proba(_X_val))
928

10-
def fit(self, X, y, *args, **kwargs):
11-
X_ = np.log(clip_for_log(X))
12-
self.calibrator_ = MultinomialRegression(method='Diag', l2=self.l2).fit(X_, y, *args, **kwargs)
1329
return self

dirichletcal/calib/fulldirichlet.py

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,10 @@
1-
import logging
21
from sklearn.base import BaseEstimator, RegressorMixin
32

43
import numpy as np
54
from .multinomial import MultinomialRegression
65
from ..utils import clip_for_log
76
from sklearn.metrics import log_loss
87

9-
from .multinomial import _get_identity_weights
10-
118

129
class FullDirichletCalibrator(BaseEstimator, RegressorMixin):
1310
def __init__(self, reg_lambda=0.0, reg_mu=None, weights_init=None,
@@ -28,7 +25,7 @@ def __init__(self, reg_lambda=0.0, reg_mu=None, weights_init=None,
2825
implements a quasi Newton method
2926
"""
3027
self.reg_lambda = reg_lambda
31-
self.reg_mu = reg_mu # Complementary L2 regularization. (Off-diagonal regularization)
28+
self.reg_mu = reg_mu # Complementary L2 regularization. (Off-diagonal)
3229
self.weights_init = weights_init # Input weights for initialisation
3330
self.initializer = initializer
3431
self.reg_norm = reg_norm
@@ -39,8 +36,6 @@ def fit(self, X, y, X_val=None, y_val=None, *args, **kwargs):
3936

4037
self.weights_ = self.weights_init
4138

42-
k = np.shape(X)[1]
43-
4439
if X_val is None:
4540
X_val = X.copy()
4641
y_val = y.copy()
@@ -50,14 +45,12 @@ def fit(self, X, y, X_val=None, y_val=None, *args, **kwargs):
5045
_X_val = np.copy(X_val)
5146
_X_val = np.log(clip_for_log(X_val))
5247

53-
self.calibrator_ = MultinomialRegression(method='Full',
54-
reg_lambda=self.reg_lambda,
55-
reg_mu=self.reg_mu,
56-
reg_norm=self.reg_norm,
57-
ref_row=self.ref_row,
58-
optimizer=self.optimizer)
48+
self.calibrator_ = MultinomialRegression(
49+
method='Full', reg_lambda=self.reg_lambda, reg_mu=self.reg_mu,
50+
reg_norm=self.reg_norm, ref_row=self.ref_row,
51+
optimizer=self.optimizer)
5952
self.calibrator_.fit(_X, y, *args, **kwargs)
60-
final_loss = log_loss(y_val, self.calibrator_.predict_proba(_X_val))
53+
self.final_loss = log_loss(y_val, self.calibrator_.predict_proba(_X_val))
6154

6255
return self
6356

dirichletcal/calib/matrixscaling.py

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,17 @@
1-
import logging
21
from sklearn.base import BaseEstimator, RegressorMixin
32

43
import numpy as np
54
from .multinomial import MultinomialRegression
65
from ..utils import clip_for_log
76
from sklearn.metrics import log_loss
87

9-
from .multinomial import _get_identity_weights
10-
118

129
class MatrixScaling(BaseEstimator, RegressorMixin):
13-
def __init__(self, reg_lambda_list=[0.0], reg_mu_list=[None],
14-
logit_input=False, logit_constant=None,
10+
def __init__(self, reg_lambda_list=[0.0], reg_mu_list=[None],
11+
logit_input=False, logit_constant=None,
1512
weights_init=None, initializer='identity'):
1613
self.weights_init = weights_init
17-
self.logit_input=logit_input
14+
self.logit_input = logit_input
1815
self.logit_constant = logit_constant
1916
self.reg_lambda_list = reg_lambda_list
2017
self.reg_mu_list = reg_mu_list
@@ -36,7 +33,7 @@ def fit(self, X, y, X_val=None, y_val=None, *args, **kwargs):
3633
X_val = X.copy()
3734
y_val = y.copy()
3835

39-
if self.logit_input == False:
36+
if not self.logit_input:
4037
_X = np.copy(X)
4138
_X = np.log(clip_for_log(_X))
4239
_X_val = np.copy(X_val)
@@ -51,14 +48,19 @@ def fit(self, X, y, X_val=None, y_val=None, *args, **kwargs):
5148
_X = np.copy(X)
5249
_X_val = np.copy(X_val)
5350

51+
final_cal = np.nan
52+
final_loss = np.nan
53+
final_reg_lambda = np.nan
54+
final_reg_mu = np.nan
55+
5456
for i in range(0, len(self.reg_lambda_list)):
5557
for j in range(0, len(self.reg_mu_list)):
56-
tmp_cal = MultinomialRegression(method='Full',
57-
reg_lambda=self.reg_lambda_list[i],
58-
reg_mu=self.reg_mu_list[j])
58+
tmp_cal = MultinomialRegression(
59+
method='Full', reg_lambda=self.reg_lambda_list[i],
60+
reg_mu=self.reg_mu_list[j])
5961
tmp_cal.fit(_X, y, *args, **kwargs)
6062
tmp_loss = log_loss(y_val, tmp_cal.predict_proba(_X_val))
61-
63+
6264
if (i + j) == 0:
6365
final_cal = tmp_cal
6466
final_loss = tmp_loss
@@ -88,7 +90,7 @@ def intercept_(self):
8890
def predict_proba(self, S):
8991
k = np.shape(S)[1]
9092

91-
if self.logit_input == False:
93+
if not self.logit_input:
9294
_S = np.log(clip_for_log(np.copy(S)))
9395
if self.logit_constant is None:
9496
_S = _S - _S[:, -1].reshape(-1, 1).repeat(k, axis=1)
@@ -102,7 +104,7 @@ def predict_proba(self, S):
102104
def predict(self, S):
103105
k = np.shape(S)[1]
104106

105-
if self.logit_input == False:
107+
if not self.logit_input:
106108
_S = np.log(clip_for_log(np.copy(S)))
107109
if self.logit_constant is None:
108110
_S = _S - _S[:, -1].reshape(-1, 1).repeat(k, axis=1)

dirichletcal/calib/multinomial.py

Lines changed: 27 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def __init__(self, weights_0=None, method='Full', initializer='identity',
3232
implements a quasi Newton method
3333
"""
3434
if method not in ['Full', 'Diag', 'FixDiag']:
35-
raise(ValueError('method {} not avaliable'.format(method)))
35+
raise(ValueError(f"method {method} not avaliable"))
3636

3737
self.weights_0 = weights_0
3838
self.method = method
@@ -68,7 +68,6 @@ def predict(self, S):
6868

6969
return np.asarray(self.predict_proba(S))
7070

71-
7271
def fit(self, X, y, *args, **kwargs):
7372

7473
self.__setup()
@@ -99,8 +98,8 @@ def fit(self, X, y, *args, **kwargs):
9998

10099
self.weights_0_ = self._get_initial_weights(self.initializer)
101100

102-
if (self.optimizer == 'newton'
103-
or (self.optimizer == 'auto' and k <= 36)):
101+
if (self.optimizer == 'newton' or
102+
(self.optimizer == 'auto' and k <= 36)):
104103
weights = _newton_update(self.weights_0_, X_, XXT, target, k,
105104
self.method, reg_lambda=self.reg_lambda,
106105
reg_mu=self.reg_mu, ref_row=self.ref_row,
@@ -141,6 +140,8 @@ def _get_initial_weights(self, ref_row, initializer='identity'):
141140

142141
k = len(self.classes)
143142

143+
weights_0 = self.weights_0_
144+
144145
if self.weights_0_ is None:
145146
if initializer == 'identity':
146147
weights_0 = _get_identity_weights(k, ref_row, self.method)
@@ -185,33 +186,33 @@ def _objective(params, *args):
185186

186187

187188
def _get_weights(params, k, ref_row, method):
188-
''' Reshapes the given params (weights) into the full matrix including 0
189-
'''
189+
'''Reshapes the given params (weights) into the full matrix including 0
190+
'''
190191

191-
if method in ['Full', None]:
192-
raw_weights = params.reshape(-1, k+1)
193-
# weights = np.zeros([k, k+1])
194-
# weights[:-1, :] = params.reshape(-1, k + 1)
192+
if method in ['Full', None]:
193+
raw_weights = params.reshape(-1, k+1)
194+
# weights = np.zeros([k, k+1])
195+
# weights[:-1, :] = params.reshape(-1, k + 1)
195196

196-
elif method == 'Diag':
197-
raw_weights = np.hstack([np.diag(params[:k]),
198-
params[k:].reshape(-1, 1)])
199-
# weights[:, :-1][np.diag_indices(k)] = params[:]
197+
elif method == 'Diag':
198+
raw_weights = np.hstack([np.diag(params[:k]),
199+
params[k:].reshape(-1, 1)])
200+
# weights[:, :-1][np.diag_indices(k)] = params[:]
200201

201-
elif method == 'FixDiag':
202-
raw_weights = np.hstack([np.eye(k) * params[0], np.zeros((k, 1))])
203-
# weights[np.dgag_indices(k - 1)] = params[0]
204-
# weights[np.diag_indices(k)] = params[0]
205-
else:
206-
raise(ValueError("Unknown calibration method {}".format(method)))
202+
elif method == 'FixDiag':
203+
raw_weights = np.hstack([np.eye(k) * params[0], np.zeros((k, 1))])
204+
# weights[np.dgag_indices(k - 1)] = params[0]
205+
# weights[np.diag_indices(k)] = params[0]
206+
else:
207+
raise(ValueError(f"Unknown calibration method {method}"))
207208

208-
if ref_row:
209-
weights = raw_weights - np.repeat(
210-
raw_weights[-1, :].reshape(1, -1), k, axis=0)
211-
else:
212-
weights = raw_weights
209+
if ref_row:
210+
weights = raw_weights - np.repeat(
211+
raw_weights[-1, :].reshape(1, -1), k, axis=0)
212+
else:
213+
weights = raw_weights
213214

214-
return weights
215+
return weights
215216

216217

217218
def _get_identity_weights(n_classes, ref_row, method):

dirichletcal/calib/tempscaling.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,14 @@
1-
import logging
21
from sklearn.base import BaseEstimator, RegressorMixin
32

43
import numpy as np
54
from .multinomial import MultinomialRegression
65
from ..utils import clip_for_log
76
from sklearn.metrics import log_loss
87

9-
from .multinomial import _get_identity_weights
10-
118

129
class TemperatureScaling(BaseEstimator, RegressorMixin):
13-
def __init__(self, reg_lambda_list=[0.0], reg_mu_list=[None],
14-
logit_input=False, logit_constant=None,
10+
def __init__(self, reg_lambda_list=[0.0], reg_mu_list=[None],
11+
logit_input=False, logit_constant=None,
1512
weights_init=None, initializer='identity',
1613
ref_row=True):
1714
self.weights_init = weights_init
@@ -28,7 +25,6 @@ def __setup(self):
2825
self.reg_mu = None
2926
self.weights_ = self.weights_init
3027

31-
3228
def fit(self, X, y, X_val=None, y_val=None, *args, **kwargs):
3329

3430
self.__setup()
@@ -39,7 +35,7 @@ def fit(self, X, y, X_val=None, y_val=None, *args, **kwargs):
3935
X_val = X.copy()
4036
y_val = y.copy()
4137

42-
if self.logit_input == False:
38+
if not self.logit_input:
4339
_X = np.copy(X)
4440
_X = np.log(clip_for_log(_X))
4541
_X_val = np.copy(X_val)
@@ -54,15 +50,19 @@ def fit(self, X, y, X_val=None, y_val=None, *args, **kwargs):
5450
_X = np.copy(X)
5551
_X_val = np.copy(X_val)
5652

53+
final_cal = np.nan
54+
final_loss = np.nan
55+
final_reg_lambda = np.nan
56+
final_reg_mu = np.nan
57+
5758
for i in range(0, len(self.reg_lambda_list)):
5859
for j in range(0, len(self.reg_mu_list)):
59-
tmp_cal = MultinomialRegression(method='FixDiag',
60-
reg_lambda=self.reg_lambda_list[i],
61-
reg_mu=self.reg_mu_list[j],
62-
ref_row=self.ref_row)
60+
tmp_cal = MultinomialRegression(
61+
method='FixDiag', reg_lambda=self.reg_lambda_list[i],
62+
reg_mu=self.reg_mu_list[j], ref_row=self.ref_row)
6363
tmp_cal.fit(_X, y, *args, **kwargs)
6464
tmp_loss = log_loss(y_val, tmp_cal.predict_proba(_X_val))
65-
65+
6666
if (i + j) == 0:
6767
final_cal = tmp_cal
6868
final_loss = tmp_loss
@@ -92,7 +92,7 @@ def intercept_(self):
9292
def predict_proba(self, S):
9393
k = np.shape(S)[1]
9494

95-
if self.logit_input == False:
95+
if not self.logit_input:
9696
_S = np.log(clip_for_log(np.copy(S)))
9797
if self.logit_constant is None:
9898
_S = _S - _S[:, -1].reshape(-1, 1).repeat(k, axis=1)
@@ -106,7 +106,7 @@ def predict_proba(self, S):
106106
def predict(self, S):
107107
k = np.shape(S)[1]
108108

109-
if self.logit_input == False:
109+
if not self.logit_input:
110110
_S = np.log(clip_for_log(np.copy(S)))
111111
if self.logit_constant is None:
112112
_S = _S - _S[:, -1].reshape(-1, 1).repeat(k, axis=1)

0 commit comments

Comments
 (0)