/
_gb.py
2168 lines (1845 loc) · 84.5 KB
/
_gb.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""Gradient Boosted Regression Trees.
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
import math
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
from time import time
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, issparse
from .._loss.loss import (
_LOSSES,
AbsoluteError,
ExponentialLoss,
HalfBinomialLoss,
HalfMultinomialLoss,
HalfSquaredError,
HuberLoss,
PinballLoss,
)
from ..base import ClassifierMixin, RegressorMixin, _fit_context, is_classifier
from ..dummy import DummyClassifier, DummyRegressor
from ..exceptions import NotFittedError
from ..model_selection import train_test_split
from ..preprocessing import LabelEncoder
from ..tree import DecisionTreeRegressor
from ..tree._tree import DOUBLE, DTYPE, TREE_LEAF
from ..utils import check_array, check_random_state, column_or_1d
from ..utils._param_validation import HasMethods, Interval, StrOptions
from ..utils.multiclass import check_classification_targets
from ..utils.stats import _weighted_percentile
from ..utils.validation import _check_sample_weight, check_is_fitted
from ._base import BaseEnsemble
from ._gradient_boosting import _random_sample_mask, predict_stage, predict_stages
_LOSSES = _LOSSES.copy()
_LOSSES.update(
{
"quantile": PinballLoss,
"huber": HuberLoss,
}
)
def _safe_divide(numerator, denominator):
"""Prevents overflow and division by zero."""
# This is used for classifiers where the denominator might become zero exatly.
# For instance for log loss, HalfBinomialLoss, if proba=0 or proba=1 exactly, then
# denominator = hessian = 0, and we should set the node value in the line search to
# zero as there is no improvement of the loss possible.
# For numerical safety, we do this already for extremely tiny values.
if abs(denominator) < 1e-150:
return 0.0
else:
# Cast to Python float to trigger Python errors, e.g. ZeroDivisionError,
# without relying on `np.errstate` that is not supported by Pyodide.
result = float(numerator) / float(denominator)
# Cast to Python float to trigger a ZeroDivisionError without relying
# on `np.errstate` that is not supported by Pyodide.
result = float(numerator) / float(denominator)
if math.isinf(result):
warnings.warn("overflow encountered in _safe_divide", RuntimeWarning)
return result
def _init_raw_predictions(X, estimator, loss, use_predict_proba):
"""Return the initial raw predictions.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data array.
estimator : object
The estimator to use to compute the predictions.
loss : BaseLoss
An instance of a loss function class.
use_predict_proba : bool
Whether estimator.predict_proba is used instead of estimator.predict.
Returns
-------
raw_predictions : ndarray of shape (n_samples, K)
The initial raw predictions. K is equal to 1 for binary
classification and regression, and equal to the number of classes
for multiclass classification. ``raw_predictions`` is casted
into float64.
"""
# TODO: Use loss.fit_intercept_only where appropriate instead of
# DummyRegressor which is the default given by the `init` parameter,
# see also _init_state.
if use_predict_proba:
# Our parameter validation, set via _fit_context and _parameter_constraints
# already guarantees that estimator has a predict_proba method.
predictions = estimator.predict_proba(X)
if not loss.is_multiclass:
predictions = predictions[:, 1] # probability of positive class
eps = np.finfo(np.float32).eps # FIXME: This is quite large!
predictions = np.clip(predictions, eps, 1 - eps, dtype=np.float64)
else:
predictions = estimator.predict(X).astype(np.float64)
if predictions.ndim == 1:
return loss.link.link(predictions).reshape(-1, 1)
else:
return loss.link.link(predictions)
def _update_terminal_regions(
loss,
tree,
X,
y,
neg_gradient,
raw_prediction,
sample_weight,
sample_mask,
learning_rate=0.1,
k=0,
):
"""Update the leaf values to be predicted by the tree and raw_prediction.
The current raw predictions of the model (of this stage) are updated.
Additionally, the terminal regions (=leaves) of the given tree are updated as well.
This corresponds to the line search step in "Greedy Function Approximation" by
Friedman, Algorithm 1 step 5.
Update equals:
argmin_{x} loss(y_true, raw_prediction_old + x * tree.value)
For non-trivial cases like the Binomial loss, the update has no closed formula and
is an approximation, again, see the Friedman paper.
Also note that the update formula for the SquaredError is the identity. Therefore,
in this case, the leaf values don't need an update and only the raw_predictions are
updated (with the learning rate included).
Parameters
----------
loss : BaseLoss
tree : tree.Tree
The tree object.
X : ndarray of shape (n_samples, n_features)
The data array.
y : ndarray of shape (n_samples,)
The target labels.
neg_gradient : ndarray of shape (n_samples,)
The negative gradient.
raw_prediction : ndarray of shape (n_samples, n_trees_per_iteration)
The raw predictions (i.e. values from the tree leaves) of the
tree ensemble at iteration ``i - 1``.
sample_weight : ndarray of shape (n_samples,)
The weight of each sample.
sample_mask : ndarray of shape (n_samples,)
The sample mask to be used.
learning_rate : float, default=0.1
Learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default=0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
if not isinstance(loss, HalfSquaredError):
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
if isinstance(loss, HalfBinomialLoss):
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
# Make a single Newton-Raphson step, see "Additive Logistic Regression:
# A Statistical View of Boosting" FHT00 and note that we use a slightly
# different version (factor 2) of "F" with proba=expit(raw_prediction).
# Our node estimate is given by:
# sum(w * (y - prob)) / sum(w * prob * (1 - prob))
# we take advantage that: y - prob = neg_gradient
neg_g = neg_gradient.take(indices, axis=0)
prob = y_ - neg_g
# numerator = negative gradient = y - prob
numerator = np.average(neg_g, weights=sw)
# denominator = hessian = prob * (1 - prob)
denominator = np.average(prob * (1 - prob), weights=sw)
return _safe_divide(numerator, denominator)
elif isinstance(loss, HalfMultinomialLoss):
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
# we take advantage that: y - prob = neg_gradient
neg_g = neg_gradient.take(indices, axis=0)
prob = y_ - neg_g
K = loss.n_classes
# numerator = negative gradient * (k - 1) / k
# Note: The factor (k - 1)/k appears in the original papers "Greedy
# Function Approximation" by Friedman and "Additive Logistic
# Regression" by Friedman, Hastie, Tibshirani. This factor is, however,
# wrong or at least arbitrary as it directly multiplies the
# learning_rate. We keep it for backward compatibility.
numerator = np.average(neg_g, weights=sw)
numerator *= (K - 1) / K
# denominator = (diagonal) hessian = prob * (1 - prob)
denominator = np.average(prob * (1 - prob), weights=sw)
return _safe_divide(numerator, denominator)
elif isinstance(loss, ExponentialLoss):
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
neg_g = neg_gradient.take(indices, axis=0)
# numerator = negative gradient = y * exp(-raw) - (1-y) * exp(raw)
numerator = np.average(neg_g, weights=sw)
# denominator = hessian = y * exp(-raw) + (1-y) * exp(raw)
# if y=0: hessian = exp(raw) = -neg_g
# y=1: hessian = exp(-raw) = neg_g
hessian = neg_g.copy()
hessian[y_ == 0] *= -1
denominator = np.average(hessian, weights=sw)
return _safe_divide(numerator, denominator)
else:
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
return loss.fit_intercept_only(
y_true=y_ - raw_prediction[indices, k],
sample_weight=sw,
)
# update each leaf (= perform line search)
for leaf in np.nonzero(tree.children_left == TREE_LEAF)[0]:
indices = np.nonzero(masked_terminal_regions == leaf)[
0
] # of terminal regions
y_ = y.take(indices, axis=0)
sw = None if sample_weight is None else sample_weight[indices]
update = compute_update(y_, indices, neg_gradient, raw_prediction, k)
# TODO: Multiply here by learning rate instead of everywhere else.
tree.value[leaf, 0, 0] = update
# update predictions (both in-bag and out-of-bag)
raw_prediction[:, k] += learning_rate * tree.value[:, 0, 0].take(
terminal_regions, axis=0
)
def set_huber_delta(loss, y_true, raw_prediction, sample_weight=None):
"""Calculate and set self.closs.delta based on self.quantile."""
abserr = np.abs(y_true - raw_prediction.squeeze())
# sample_weight is always a ndarray, never None.
delta = _weighted_percentile(abserr, sample_weight, 100 * loss.quantile)
loss.closs.delta = float(delta)
class VerboseReporter:
"""Reports verbose output to stdout.
Parameters
----------
verbose : int
Verbosity level. If ``verbose==1`` output is printed once in a while
(when iteration mod verbose_mod is zero).; if larger than 1 then output
is printed for each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
"""Initialize reporter
Parameters
----------
est : Estimator
The estimator
begin_at_stage : int, default=0
stage at which to begin reporting
"""
# header fields and line format str
header_fields = ["Iter", "Train Loss"]
verbose_fmt = ["{iter:>10d}", "{train_score:>16.4f}"]
# do oob?
if est.subsample < 1:
header_fields.append("OOB Improve")
verbose_fmt.append("{oob_impr:>16.4f}")
header_fields.append("Remaining Time")
verbose_fmt.append("{remaining_time:>16s}")
# print the header line
print(("%10s " + "%16s " * (len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = " ".join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration.
Parameters
----------
j : int
The new iteration.
est : Estimator
The estimator.
"""
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = (
(est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)
)
if remaining_time > 60:
remaining_time = "{0:.2f}m".format(remaining_time / 60.0)
else:
remaining_time = "{0:.2f}s".format(remaining_time)
print(
self.verbose_fmt.format(
iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time,
)
)
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(BaseEnsemble, metaclass=ABCMeta):
"""Abstract base class for Gradient Boosting."""
_parameter_constraints: dict = {
**DecisionTreeRegressor._parameter_constraints,
"learning_rate": [Interval(Real, 0.0, None, closed="left")],
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"criterion": [StrOptions({"friedman_mse", "squared_error"})],
"subsample": [Interval(Real, 0.0, 1.0, closed="right")],
"verbose": ["verbose"],
"warm_start": ["boolean"],
"validation_fraction": [Interval(Real, 0.0, 1.0, closed="neither")],
"n_iter_no_change": [Interval(Integral, 1, None, closed="left"), None],
"tol": [Interval(Real, 0.0, None, closed="left")],
}
_parameter_constraints.pop("splitter")
_parameter_constraints.pop("monotonic_cst")
@abstractmethod
def __init__(
self,
*,
loss,
learning_rate,
n_estimators,
criterion,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_depth,
min_impurity_decrease,
init,
subsample,
max_features,
ccp_alpha,
random_state,
alpha=0.9,
verbose=0,
max_leaf_nodes=None,
warm_start=False,
validation_fraction=0.1,
n_iter_no_change=None,
tol=1e-4,
):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.criterion = criterion
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.min_impurity_decrease = min_impurity_decrease
self.ccp_alpha = ccp_alpha
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.tol = tol
@abstractmethod
def _encode_y(self, y=None, sample_weight=None):
"""Called by fit to validate and encode y."""
@abstractmethod
def _get_loss(self, sample_weight):
"""Get loss object from sklearn._loss.loss."""
def _fit_stage(
self,
i,
X,
y,
raw_predictions,
sample_weight,
sample_mask,
random_state,
X_csc=None,
X_csr=None,
):
"""Fit another stage of ``n_trees_per_iteration_`` trees."""
original_y = y
if isinstance(self._loss, HuberLoss):
set_huber_delta(
loss=self._loss,
y_true=y,
raw_prediction=raw_predictions,
sample_weight=sample_weight,
)
# TODO: Without oob, i.e. with self.subsample = 1.0, we could call
# self._loss.loss_gradient and use it to set train_score_.
# But note that train_score_[i] is the score AFTER fitting the i-th tree.
# Note: We need the negative gradient!
neg_gradient = -self._loss.gradient(
y_true=y,
raw_prediction=raw_predictions,
sample_weight=None, # We pass sample_weights to the tree directly.
)
# 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1)
# on neg_gradient to simplify the loop over n_trees_per_iteration_.
if neg_gradient.ndim == 1:
neg_g_view = neg_gradient.reshape((-1, 1))
else:
neg_g_view = neg_gradient
for k in range(self.n_trees_per_iteration_):
if self._loss.is_multiclass:
y = np.array(original_y == k, dtype=np.float64)
# induce regression tree on the negative gradient
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter="best",
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
ccp_alpha=self.ccp_alpha,
)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
X = X_csc if X_csc is not None else X
tree.fit(
X, neg_g_view[:, k], sample_weight=sample_weight, check_input=False
)
# update tree leaves
X_for_tree_update = X_csr if X_csr is not None else X
_update_terminal_regions(
self._loss,
tree.tree_,
X_for_tree_update,
y,
neg_g_view[:, k],
raw_predictions,
sample_weight,
sample_mask,
learning_rate=self.learning_rate,
k=k,
)
# add tree to ensemble
self.estimators_[i, k] = tree
return raw_predictions
def _set_max_features(self):
"""Set self.max_features_."""
if isinstance(self.max_features, str):
if self.max_features == "auto":
if is_classifier(self):
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else:
max_features = self.n_features_in_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else: # self.max_features == "log2"
max_features = max(1, int(np.log2(self.n_features_in_)))
elif self.max_features is None:
max_features = self.n_features_in_
elif isinstance(self.max_features, Integral):
max_features = self.max_features
else: # float
max_features = max(1, int(self.max_features * self.n_features_in_))
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures."""
self.init_ = self.init
if self.init_ is None:
if is_classifier(self):
self.init_ = DummyClassifier(strategy="prior")
elif isinstance(self._loss, (AbsoluteError, HuberLoss)):
self.init_ = DummyRegressor(strategy="quantile", quantile=0.5)
elif isinstance(self._loss, PinballLoss):
self.init_ = DummyRegressor(strategy="quantile", quantile=self.alpha)
else:
self.init_ = DummyRegressor(strategy="mean")
self.estimators_ = np.empty(
(self.n_estimators, self.n_trees_per_iteration_), dtype=object
)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64)
self.oob_scores_ = np.zeros((self.n_estimators), dtype=np.float64)
self.oob_score_ = np.nan
def _clear_state(self):
"""Clear the state of the gradient boosting model."""
if hasattr(self, "estimators_"):
self.estimators_ = np.empty((0, 0), dtype=object)
if hasattr(self, "train_score_"):
del self.train_score_
if hasattr(self, "oob_improvement_"):
del self.oob_improvement_
if hasattr(self, "oob_scores_"):
del self.oob_scores_
if hasattr(self, "oob_score_"):
del self.oob_score_
if hasattr(self, "init_"):
del self.init_
if hasattr(self, "_rng"):
del self._rng
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes."""
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError(
"resize with smaller n_estimators %d < %d"
% (total_n_estimators, self.estimators_[0])
)
self.estimators_ = np.resize(
self.estimators_, (total_n_estimators, self.n_trees_per_iteration_)
)
self.train_score_ = np.resize(self.train_score_, total_n_estimators)
if self.subsample < 1 or hasattr(self, "oob_improvement_"):
# if do oob resize arrays or create new if not available
if hasattr(self, "oob_improvement_"):
self.oob_improvement_ = np.resize(
self.oob_improvement_, total_n_estimators
)
self.oob_scores_ = np.resize(self.oob_scores_, total_n_estimators)
self.oob_score_ = np.nan
else:
self.oob_improvement_ = np.zeros(
(total_n_estimators,), dtype=np.float64
)
self.oob_scores_ = np.zeros((total_n_estimators,), dtype=np.float64)
self.oob_score_ = np.nan
def _is_fitted(self):
return len(getattr(self, "estimators_", [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
check_is_fitted(self)
@_fit_context(
# GradientBoosting*.init is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
y : array-like of shape (n_samples,)
Target values (strings or integers in classification, real numbers
in regression)
For classification, labels must correspond to classes.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, default=None
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshotting.
Returns
-------
self : object
Fitted estimator.
"""
if not self.warm_start:
self._clear_state()
# Check input
# Since check_array converts both X and y to the same dtype, but the
# trees use different types for X and y, checking them separately.
X, y = self._validate_data(
X, y, accept_sparse=["csr", "csc", "coo"], dtype=DTYPE, multi_output=True
)
sample_weight_is_none = sample_weight is None
sample_weight = _check_sample_weight(sample_weight, X)
if sample_weight_is_none:
y = self._encode_y(y=y, sample_weight=None)
else:
y = self._encode_y(y=y, sample_weight=sample_weight)
y = column_or_1d(y, warn=True) # TODO: Is this still required?
self._set_max_features()
# self.loss is guaranteed to be a string
self._loss = self._get_loss(sample_weight=sample_weight)
if self.n_iter_no_change is not None:
stratify = y if is_classifier(self) else None
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
random_state=self.random_state,
test_size=self.validation_fraction,
stratify=stratify,
)
if is_classifier(self):
if self.n_classes_ != np.unique(y_train).shape[0]:
# We choose to error here. The problem is that the init
# estimator would be trained on y, which has some missing
# classes now, so its predictions would not have the
# correct shape.
raise ValueError(
"The training data after the early stopping split "
"is missing some classes. Try using another random "
"seed."
)
else:
X_train, y_train, sample_weight_train = X, y, sample_weight
X_val = y_val = sample_weight_val = None
n_samples = X_train.shape[0]
# First time calling fit.
if not self._is_fitted():
# init state
self._init_state()
# fit initial model and initialize raw predictions
if self.init_ == "zero":
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=np.float64,
)
else:
# XXX clean this once we have a support_sample_weight tag
if sample_weight_is_none:
self.init_.fit(X_train, y_train)
else:
msg = (
"The initial estimator {} does not support sample "
"weights.".format(self.init_.__class__.__name__)
)
try:
self.init_.fit(
X_train, y_train, sample_weight=sample_weight_train
)
except TypeError as e:
if "unexpected keyword argument 'sample_weight'" in str(e):
# regular estimator without SW support
raise ValueError(msg) from e
else: # regular estimator whose input checking failed
raise
except ValueError as e:
if (
"pass parameters to specific steps of "
"your pipeline using the "
"stepname__parameter"
in str(e)
): # pipeline
raise ValueError(msg) from e
else: # regular estimator whose input checking failed
raise
raw_predictions = _init_raw_predictions(
X_train, self.init_, self._loss, is_classifier(self)
)
begin_at_stage = 0
# The rng state must be preserved if warm_start is True
self._rng = check_random_state(self.random_state)
# warm start: this is not the first time fit was called
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"estimators_.shape[0]=%d when "
"warm_start==True" % (self.n_estimators, self.estimators_.shape[0])
)
begin_at_stage = self.estimators_.shape[0]
# The requirements of _raw_predict
# are more constrained than fit. It accepts only CSR
# matrices. Finite values have already been checked in _validate_data.
X_train = check_array(
X_train,
dtype=DTYPE,
order="C",
accept_sparse="csr",
force_all_finite=False,
)
raw_predictions = self._raw_predict(X_train)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(
X_train,
y_train,
raw_predictions,
sample_weight_train,
self._rng,
X_val,
y_val,
sample_weight_val,
begin_at_stage,
monitor,
)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, "oob_improvement_"):
# OOB scores were computed
self.oob_improvement_ = self.oob_improvement_[:n_stages]
self.oob_scores_ = self.oob_scores_[:n_stages]
self.oob_score_ = self.oob_scores_[-1]
self.n_estimators_ = n_stages
return self
def _fit_stages(
self,
X,
y,
raw_predictions,
sample_weight,
random_state,
X_val,
y_val,
sample_weight_val,
begin_at_stage=0,
monitor=None,
):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples,), dtype=bool)
n_inbag = max(1, int(self.subsample * n_samples))
if self.verbose:
verbose_reporter = VerboseReporter(verbose=self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
if self.n_iter_no_change is not None:
loss_history = np.full(self.n_iter_no_change, np.inf)
# We create a generator to get the predictions for X_val after
# the addition of each successive stage
y_val_pred_iter = self._staged_raw_predict(X_val, check_input=False)
# Older versions of GBT had its own loss functions. With the new common
# private loss function submodule _loss, we often are a factor of 2
# away from the old version. Here we keep backward compatibility for
# oob_scores_ and oob_improvement_, even if the old way is quite
# inconsistent (sometimes the gradient is half the gradient, sometimes
# not).
if isinstance(
self._loss,
(
HalfSquaredError,
HalfBinomialLoss,
),
):
factor = 2
else:
factor = 1
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag, random_state)
y_oob_masked = y[~sample_mask]
sample_weight_oob_masked = sample_weight[~sample_mask]
if i == 0: # store the initial loss to compute the OOB score
initial_loss = factor * self._loss(
y_true=y_oob_masked,
raw_prediction=raw_predictions[~sample_mask],
sample_weight=sample_weight_oob_masked,
)
# fit next stage of trees
raw_predictions = self._fit_stage(
i,
X,
y,
raw_predictions,
sample_weight,
sample_mask,
random_state,
X_csc=X_csc,
X_csr=X_csr,
)
# track loss
if do_oob:
self.train_score_[i] = factor * self._loss(
y_true=y[sample_mask],
raw_prediction=raw_predictions[sample_mask],
sample_weight=sample_weight[sample_mask],
)
self.oob_scores_[i] = factor * self._loss(
y_true=y_oob_masked,
raw_prediction=raw_predictions[~sample_mask],
sample_weight=sample_weight_oob_masked,
)
previous_loss = initial_loss if i == 0 else self.oob_scores_[i - 1]
self.oob_improvement_[i] = previous_loss - self.oob_scores_[i]
self.oob_score_ = self.oob_scores_[-1]
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = factor * self._loss(
y_true=y,
raw_prediction=raw_predictions,
sample_weight=sample_weight,
)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
# We also provide an early stopping based on the score from
# validation set (X_val, y_val), if n_iter_no_change is set
if self.n_iter_no_change is not None:
# By calling next(y_val_pred_iter), we get the predictions
# for X_val after the addition of the current stage
validation_loss = factor * self._loss(
y_val, next(y_val_pred_iter), sample_weight_val
)
# Require validation_score to be better (less) than at least
# one of the last n_iter_no_change evaluations
if np.any(validation_loss + self.tol < loss_history):
loss_history[i % len(loss_history)] = validation_loss
else:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _raw_predict_init(self, X):
"""Check input and compute raw predictions of the init estimator."""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if self.init_ == "zero":
raw_predictions = np.zeros(
shape=(X.shape[0], self.n_trees_per_iteration_), dtype=np.float64
)
else:
raw_predictions = _init_raw_predictions(
X, self.init_, self._loss, is_classifier(self)
)
return raw_predictions
def _raw_predict(self, X):
"""Return the sum of the trees raw predictions (+ init estimator)."""
check_is_fitted(self)
raw_predictions = self._raw_predict_init(X)
predict_stages(self.estimators_, X, self.learning_rate, raw_predictions)
return raw_predictions
def _staged_raw_predict(self, X, check_input=True):
"""Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
If False, the input arrays X will not be checked.
Returns
-------
raw_predictions : generator of ndarray of shape (n_samples, k)
The raw predictions of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
if check_input:
X = self._validate_data(
X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
)
raw_predictions = self._raw_predict_init(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, raw_predictions)
yield raw_predictions.copy()
@property
def feature_importances_(self):