Skip to content

API

Supervised-training

AutoML class

Bases: BaseAutoML

Automated Machine Learning for supervised tasks (binary classification, multiclass classification, regression).

Source code in supervised\automl.py
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
class AutoML(BaseAutoML):
    """
    Automated Machine Learning for supervised tasks (binary classification, multiclass classification, regression).
    """

    def __init__(
            self,
            results_path: Optional[str] = None,
            total_time_limit: int = 60 * 60,
            mode: Literal["Explain", "Perform", "Compete", "Optuna"] = "Explain",
            ml_task: Literal[
                "auto", "binary_classification", "multiclass_classification", "regression"
            ] = "auto",
            model_time_limit: Optional[int] = None,
            algorithms: Union[
                Literal["auto"],
                List[
                    Literal[
                        "Baseline",
                        "Linear",
                        "Decicion Tree",
                        "Random Forest",
                        "Extra Trees",
                        "LightGBM",
                        "Xgboost",
                        "CatBoost",
                        "Neural Network",
                        "Nearest Neighbors",
                    ]
                ],
            ] = "auto",
            train_ensemble: bool = True,
            stack_models: Union[Literal["auto"], bool] = "auto",
            eval_metric: str = "auto",
            validation_strategy: Union[Literal["auto"], dict] = "auto",
            explain_level: Union[Literal["auto"], Literal[0, 1, 2]] = "auto",
            composite_features: Union[Literal["auto"], bool, int] = "auto",
            features_selection: Union[Literal["auto"], bool] = "auto",
            start_random_models: Union[Literal["auto"], int] = "auto",
            hill_climbing_steps: Union[Literal["auto"], int] = "auto",
            top_models_to_improve: Union[Literal["auto"], int] = "auto",
            boost_on_errors: Union[Literal["auto"], bool] = "auto",
            kmeans_features: Union[Literal["auto"], bool] = "auto",
            mix_encoding: Union[Literal["auto"], bool] = "auto",
            max_single_prediction_time: Optional[Union[int, float]] = None,
            optuna_time_budget: Optional[int] = None,
            optuna_init_params: dict = {},
            optuna_verbose: bool = True,
            fairness_metric: str = "auto",
            fairness_threshold: Union[Literal["auto"], float] = "auto",
            privileged_groups: Union[Literal["auto"], list] = "auto",
            underprivileged_groups: Union[Literal["auto"], list] = "auto",
            n_jobs: int = -1,
            verbose: int = 1,
            random_state: int = 1234,
            self_training=False,
    ):
        """
        Initialize `AutoML` object.

        Arguments:
            results_path (str): The path with results.
                If None, then the name of directory will be generated with the template: AutoML_{number},
                where the number can be from 1 to 1,000 - depends which direcory name will be available.
                If the `results_path` will point to directory with AutoML results (`params.json` must be present),
                then all models will be loaded.

            total_time_limit (int): The total time limit in seconds for AutoML training.
                It is not used when `model_time_limit` is not `None`.

            mode (str): Can be {`Explain`, `Perform`, `Compete`, `Optuna`}.
                This parameter defines the goal of AutoML and how intensive the AutoML search will be.

                - `Explain` : To to be used when the user wants to explain and understand the data.
                    - Uses 75%/25% train/test split.
                    - Uses the following models: `Baseline`, `Linear`, `Decision Tree`, `Random Forest`, `XGBoost`, `Neural Network`, and `Ensemble`.
                    - Has full explanations in reports: learning curves, importance plots, and SHAP plots.
                - `Perform` : To be used when the user wants to train a model that will be used in real-life use cases.
                    - Uses 5-fold CV (Cross-Validation).
                    - Uses the following models: `Linear`, `Random Forest`, `LightGBM`, `XGBoost`, `CatBoost`, `Neural Network`, and `Ensemble`.
                    - Has learning curves and importance plots in reports.
                - `Compete` : To be used for machine learning competitions (maximum performance).
                    - Uses 80/20 train/test split, or 5-fold CV, or 10-fold CV (Cross-Validation) - it depends on `total_time_limit`. If not set directly, AutoML will select validation automatically.
                    - Uses the following models: `Decision Tree`, `Random Forest`, `Extra Trees`, `LightGBM`,  `XGBoost`, `CatBoost`, `Neural Network`,
                        `Nearest Neighbors`, `Ensemble`, and `Stacking`.
                    - It has only learning curves in the reports.
                - `Optuna` : To be used for creating highly-tuned machine learning models.
                    - Uses 10-fold CV (Cross-Validation).
                    - It tunes with Optuna the following algorithms: `Random Forest`, `Extra Trees`, `LightGBM`, `XGBoost`, `CatBoost`, `Neural Network`.
                    - It applies `Ensemble` and `Stacking` for trained models.
                    - It has only learning curves in the reports.

            ml_task (str): Can be {"auto", "binary_classification", "multiclass_classification", "regression"}.

                - If left `auto` AutoML will try to guess the task based on target values.
                - If there will be only 2 values in the target, then task will be set to `"binary_classification"`.
                - If number of values in the target will be between 2 and 20 (included), then task will be set to `"multiclass_classification"`.
                - In all other casses, the task is set to `"regression"`.

            model_time_limit (int): The time limit for training a single model, in seconds.
                If `model_time_limit` is set, the `total_time_limit` is not respected.
                The single model can contain several learners. The time limit for subsequent learners is computed based on `model_time_limit`.

                For example, in the case of 10-fold cross-validation, one model will have 10 learners.
                The `model_time_limit` is the time for all 10 learners.

            algorithms (list of str): The list of algorithms that will be used in the training.
                The algorithms can be:

                - `Baseline`,
                - `Linear`,
                - `Decision Tree`,
                - `Random Forest`,
                - `Extra Trees`,
                - `LightGBM`,
                - `Xgboost`,
                - `CatBoost`,
                - `Neural Network`,
                - `Nearest Neighbors`,


            train_ensemble (boolean): Whether an ensemble gets created at the end of the training.

            stack_models (boolean): Whether a models stack gets created at the end of the training. Stack level is 1.

            eval_metric (str): The metric to be used in early stopping and to compare models.

                - for binary classification: `logloss`, `auc`, `f1`, `average_precision`, `accuracy` - default is logloss (if left "auto")
                - for mutliclass classification: `logloss`, `f1`, `accuracy` - default is `logloss` (if left "auto")
                - for regression: `rmse`, `mse`, `mae`, `r2`, `mape`, `spearman`, `pearson` - default is `rmse` (if left "auto")

            validation_strategy (dict): Dictionary with validation type. Train/test split and cross-validation are supported.

            explain_level (int): The level of explanations included to each model:

                - if `explain_level` is `0` no explanations are produced.
                - if `explain_level` is `1` the following explanations are produced: importance plot (with permutation method), for decision trees produce tree plots, for linear models save coefficients.
                - if `explain_level` is `2` the following explanations are produced: the same as `1` plus SHAP explanations.

                If left `auto` AutoML will produce explanations based on the selected `mode`.

            composite_features (boolean or int): Whether to use golden features (and how many should be added)
                If left `auto` AutoML will use golden features based on the selected `mode`:

                - If `mode` is "Explain", `composite_features` = False.
                - If `mode` is "Perform", `composite_features` = True.
                - If `mode` is "Compete", `composite_features` = True.

                If `boolean` value is set then the number of Golden Features is set automatically.
                It is set to min(100, max(10, 0.1*number_of_input_features)).

                If `int` value is set, the number of Golden Features is set to this value.

            features_selection (boolean): Whether to do features_selection
                If left `auto` AutoML will do feature selection based on the selected `mode`:

                - If `mode` is "Explain", `features_selection` = False.
                - If `mode` is "Perform", `features_selection` = True.
                - If `mode` is "Compete", `features_selection` = True.

            start_random_models (int): Number of starting random models to try.
                If left `auto` AutoML will select it based on the selected `mode`:

                - If `mode` is "Explain", `start_random_models` = 1.
                - If `mode` is "Perform", `start_random_models` = 5.
                - If `mode` is "Compete", `start_random_models` = 10.

            hill_climbing_steps (int): Number of steps to perform during hill climbing.
                If left `auto` AutoML will select it based on the selected `mode`:

                - If `mode` is "Explain", `hill_climbing_steps` = 0.
                - If `mode` is "Perform", `hill_climbing_steps` = 2.
                - If `mode` is "Compete", `hill_climbing_steps` = 2.

            top_models_to_improve (int): Number of best models to improve in `hill_climbing` steps.
                If left `auto` AutoML will select it based on the selected `mode`:

                - If `mode` is "Explain", `top_models_to_improve` = 0.
                - If `mode` is "Perform", `top_models_to_improve` = 2.
                - If `mode` is "Compete", `top_models_to_improve` = 3.

            boost_on_errors (boolean): Whether a model with boost on errors from previous best model should be trained. By default available in the `Compete` mode.

            kmeans_features (boolean): Whether a model with k-means generated features should be trained. By default available in the `Compete` mode.

            mix_encoding (boolean): Whether a model with mixed encoding should be trained. Mixed encoding is the encoding that uses label encoding
                for categoricals with more than 25 categories, and one-hot binary encoding for other categoricals. It is only applied if there are
                categorical features with cardinality smaller than 25. By default it is available in the `Compete` mode.

            max_single_prediction_time (int or float): The limit for prediction time for single sample. Use it if you want to have a model with fast predictions.
                Ideal for creating ML pipelines used as REST API. Time is in seconds. By default (`max_single_prediction_time=None`) models are not optimized for fast predictions,
                except the mode `Perform`. For the mode `Perform` the default is `0.5` seconds.

            optuna_time_budget (int): The time in seconds which should be used by Optuna to tune each algorithm. It is time for tuning single algorithm.
                If you select two algorithms: Xgboost and CatBoost, and set optuna_time_budget=1000, then Xgboost will be tuned for 1000 seconds and CatBoost will be tuned for 1000 seconds.
                What is more, the tuning is made for each data type, for example for raw data and for data with inserted Golden Features.
                This parameter is only used when `mode="Optuna"`. If you set `mode="Optuna"` and forget to set this parameter, it will be set to 3600 seconds.

            optuna_init_params (dict): If you have already tuned parameters from Optuna you can reuse them by setting this parameter.
                This parameter is only used when `mode="Optuna"`. The dict should have structure and params as specified in the AutoML .

            optuna_verbose (boolean): If true the Optuna tuning details are displayed. Set to `True` by default.

            fairness_metric (string): Name of fairness metric that will be used for assessing fairness criteria.
                Available metrics for binary and multiclass classification:

                - `demographic_parity_difference`,
                - `demographic_parity_ratio` - default metric,
                - `equalized_odds_difference`,
                - `equalized_odds_ratio`.

                Metrics for regression:

                - `group_loss_difference`,
                - `group_loss_ratio` - default metric.


            fairness_threshold (float): The treshold value for fairness metric.
                The direction optimization (below or above threshold) of fairness metric is determined automatically.

                Default values:

                - for `demographic_parity_difference` the metric value should be below 0.1,
                - for `demographic_parity_ratio` the metric value should be above 0.8,
                - for `equalized_odds_difference` the metric value should be below 0.1,
                - for `equalized_odds_ratio` the metric value shoule be above 0.8.
                - for `group_loss_ratio` the metric value shoule be above 0.8.

                For `group_loss_difference` the default threshold value can't be set because it depends on the dataset.
                If `group_loss_difference` metric is used and `fairness_threshold` is not specified manually, then an exception will be raised.

            privileged_groups (list): The list of privileged groups.

                By default, list of privileged groups are automatically detected based on fairness metrics.
                For example, in binary classification task, a privileged group is the one with the highest selection rate.

                Example value: `[{"sex": "Male"}]`

            underprivileged_groups (list): The list of underprivileged groups.

                By default, list of underprivileged groups are automatically detected based on fairness metrics.
                For example, in binary classification task, an underprivileged group is the one with the lowest selection rate.

                Example value: `[{"sex": "Female"}]`

            n_jobs (int): Number of CPU cores to be used. Default is set to `-1` which means using  all processors.

            verbose (int): Controls the verbosity when fitting and predicting.

                Note:
                    Still not implemented, please left `1`

            random_state (int): Controls the randomness of the `AutoML`


        """
        super(AutoML, self).__init__()
        # Set user arguments
        self.mode = mode
        self.ml_task = ml_task
        self.results_path = results_path
        self.total_time_limit = total_time_limit
        self.model_time_limit = model_time_limit
        self.algorithms = algorithms
        self.train_ensemble = train_ensemble
        self.stack_models = stack_models
        self.eval_metric = eval_metric
        self.validation_strategy = validation_strategy
        self.verbose = verbose
        self.explain_level = explain_level
        self.composite_features = composite_features
        self.features_selection = features_selection
        self.start_random_models = start_random_models
        self.hill_climbing_steps = hill_climbing_steps
        self.top_models_to_improve = top_models_to_improve
        self.boost_on_errors = boost_on_errors
        self.kmeans_features = kmeans_features
        self.mix_encoding = mix_encoding
        self.max_single_prediction_time = max_single_prediction_time
        self.optuna_time_budget = optuna_time_budget
        self.optuna_init_params = optuna_init_params
        self.optuna_verbose = optuna_verbose
        self.fairness_metric = fairness_metric
        self.fairness_threshold = fairness_threshold
        self.privileged_groups = privileged_groups
        self.underprivileged_groups = underprivileged_groups
        self.n_jobs = n_jobs
        self.random_state = random_state
        self.self_training = self_training

    def fit(
            self,
            X: Union[numpy.ndarray, pandas.DataFrame],
            y: Union[numpy.ndarray, pandas.Series],
            sample_weight: Optional[Union[numpy.ndarray, pandas.Series]] = None,
            cv: Optional[Union[Iterable, List]] = None,
            sensitive_features: Optional[
                Union[numpy.ndarray, pandas.Series, pandas.DataFrame]
            ] = None,
    ):
        """Fit the AutoML model.

        Arguments:
            X (numpy.ndarray or pandas.DataFrame): Training data

            y (numpy.ndarray or pandas.Series): Training targets

            sample_weight (numpy.ndarray or pandas.Series): Training sample weights

            cv (iterable or list): List or iterable with (train, validation) splits representing array of indices.
                It is used only with custom validation (`validation_strategy={'validation_type': 'custom'}`).

            sensitive_features (numpy.ndarray or pandas.Series or pandas.DataFrame):
                Sensitive features to learn fair models

        Returns:
            AutoML object: Returns `self`
        """
        return self._fit(X, y, sample_weight, cv, sensitive_features)

    def predict(self, X: Union[List, numpy.ndarray, pandas.DataFrame]) -> numpy.ndarray:
        """
        Computes predictions from AutoML best model.

        Arguments:
            X (list or numpy.ndarray or pandas.DataFrame):
                Input values to make predictions on.

        Returns:
            numpy.ndarray:

            - One-dimensional array of class labels for classification.
            - One-dimensional array of predictions for regression.
        """
        return self._predict(X)

    def predict_proba(
            self, X: Union[List, numpy.ndarray, pandas.DataFrame]
    ) -> numpy.ndarray:
        """
        Computes class probabilities from AutoML best model.
        This method can only be used for classification tasks.

        Arguments:
            X (list or numpy.ndarray or pandas.DataFrame):
                Input values to make predictions on.

        Returns:
            numpy.ndarray of shape (n_samples, n_classes):
                Matrix of containing class probabilities of the input samples

        Raises:
            AutoMLException: Model has not yet been fitted.

        """
        return self._predict_proba(X)

    def predict_all(
            self, X: Union[List, numpy.ndarray, pandas.DataFrame]
    ) -> pandas.DataFrame:
        """
        Computes both class probabilities and class labels for classification tasks.
        Computes predictions for regression tasks.

        Arguments:
            X (list or numpy.ndarray or pandas.DataFrame):
                Input values to make predictions on.

        Returns:
            pandas.Dataframe:
                Dataframe (n_samples, n_classes + 1) containing both class probabilities and class
                labels of the input samples for classification tasks.
                Dataframe with predictions for regression tasks.

        Raises:
            AutoMLException: Model has not yet been fitted.

        """
        return self._predict_all(X)

    def score(
            self,
            X: Union[numpy.ndarray, pandas.DataFrame],
            y: Optional[Union[numpy.ndarray, pandas.Series]] = None,
            sample_weight: Optional[Union[numpy.ndarray, pandas.Series]] = None,
    ) -> float:
        """Calculates a goodness of `fit` for an AutoML instance.

        Arguments:
            X (numpy.ndarray or pandas.DataFrame):
                Test values to make predictions on.

            y (numpy.ndarray or pandas.Series):
                True labels for X.

            sample_weight (numpy.ndarray or pandas.Series):
                Sample weights.
        Returns:
            float: Returns a goodness of fit measure (higher is better):

            - For classification tasks: returns the mean accuracy on the given test data and labels.
            - For regression tasks: returns the R^2 (coefficient of determination) on the given test data and labels.
        """
        return self._score(X, y, sample_weight)

    def report(self, width=900, height=1200):
        return self._report(width, height)

    def need_retrain(
            self,
            X: Union[numpy.ndarray, pandas.DataFrame],
            y: Union[numpy.ndarray, pandas.Series],
            sample_weight: Optional[Union[numpy.ndarray, pandas.Series]] = None,
            decrease: float = 0.1,
    ) -> bool:
        """Decides about model retraining based on new data.

        Arguments:
            X (numpy.ndarray or pandas.DataFrame):
                New data.

            y (numpy.ndarray or pandas.Series):
                True labels for X.

            sample_weight (numpy.ndarray or pandas.Series):
                Sample weights.

            decrease (float): The ratio of change in the performance used as a threshold for retraining decision.
                By default, it is set to `0.1` which means that if the performance of AutoML will decrease by 10%
                on new data then there is a need to retrain.

            Returns:
                boolean: Decides if there is a need to retrain the AutoML.
        """
        return self._need_retrain(X, y, sample_weight, decrease)

__init__(results_path=None, total_time_limit=60 * 60, mode='Explain', ml_task='auto', model_time_limit=None, algorithms='auto', train_ensemble=True, stack_models='auto', eval_metric='auto', validation_strategy='auto', explain_level='auto', composite_features='auto', features_selection='auto', start_random_models='auto', hill_climbing_steps='auto', top_models_to_improve='auto', boost_on_errors='auto', kmeans_features='auto', mix_encoding='auto', max_single_prediction_time=None, optuna_time_budget=None, optuna_init_params={}, optuna_verbose=True, fairness_metric='auto', fairness_threshold='auto', privileged_groups='auto', underprivileged_groups='auto', n_jobs=-1, verbose=1, random_state=1234, self_training=False)

Initialize AutoML object.

Parameters:

Name Type Description Default
results_path str

The path with results. If None, then the name of directory will be generated with the template: AutoML_{number}, where the number can be from 1 to 1,000 - depends which direcory name will be available. If the results_path will point to directory with AutoML results (params.json must be present), then all models will be loaded.

None
total_time_limit int

The total time limit in seconds for AutoML training. It is not used when model_time_limit is not None.

60 * 60
mode str

Can be {Explain, Perform, Compete, Optuna}. This parameter defines the goal of AutoML and how intensive the AutoML search will be.

  • Explain : To to be used when the user wants to explain and understand the data.
    • Uses 75%/25% train/test split.
    • Uses the following models: Baseline, Linear, Decision Tree, Random Forest, XGBoost, Neural Network, and Ensemble.
    • Has full explanations in reports: learning curves, importance plots, and SHAP plots.
  • Perform : To be used when the user wants to train a model that will be used in real-life use cases.
    • Uses 5-fold CV (Cross-Validation).
    • Uses the following models: Linear, Random Forest, LightGBM, XGBoost, CatBoost, Neural Network, and Ensemble.
    • Has learning curves and importance plots in reports.
  • Compete : To be used for machine learning competitions (maximum performance).
    • Uses 80/20 train/test split, or 5-fold CV, or 10-fold CV (Cross-Validation) - it depends on total_time_limit. If not set directly, AutoML will select validation automatically.
    • Uses the following models: Decision Tree, Random Forest, Extra Trees, LightGBM, XGBoost, CatBoost, Neural Network, Nearest Neighbors, Ensemble, and Stacking.
    • It has only learning curves in the reports.
  • Optuna : To be used for creating highly-tuned machine learning models.
    • Uses 10-fold CV (Cross-Validation).
    • It tunes with Optuna the following algorithms: Random Forest, Extra Trees, LightGBM, XGBoost, CatBoost, Neural Network.
    • It applies Ensemble and Stacking for trained models.
    • It has only learning curves in the reports.
'Explain'
ml_task str

Can be {"auto", "binary_classification", "multiclass_classification", "regression"}.

  • If left auto AutoML will try to guess the task based on target values.
  • If there will be only 2 values in the target, then task will be set to "binary_classification".
  • If number of values in the target will be between 2 and 20 (included), then task will be set to "multiclass_classification".
  • In all other casses, the task is set to "regression".
'auto'
model_time_limit int

The time limit for training a single model, in seconds. If model_time_limit is set, the total_time_limit is not respected. The single model can contain several learners. The time limit for subsequent learners is computed based on model_time_limit.

For example, in the case of 10-fold cross-validation, one model will have 10 learners. The model_time_limit is the time for all 10 learners.

None
algorithms list of str

The list of algorithms that will be used in the training. The algorithms can be:

  • Baseline,
  • Linear,
  • Decision Tree,
  • Random Forest,
  • Extra Trees,
  • LightGBM,
  • Xgboost,
  • CatBoost,
  • Neural Network,
  • Nearest Neighbors,
'auto'
train_ensemble boolean

Whether an ensemble gets created at the end of the training.

True
stack_models boolean

Whether a models stack gets created at the end of the training. Stack level is 1.

'auto'
eval_metric str

The metric to be used in early stopping and to compare models.

  • for binary classification: logloss, auc, f1, average_precision, accuracy - default is logloss (if left "auto")
  • for mutliclass classification: logloss, f1, accuracy - default is logloss (if left "auto")
  • for regression: rmse, mse, mae, r2, mape, spearman, pearson - default is rmse (if left "auto")
'auto'
validation_strategy dict

Dictionary with validation type. Train/test split and cross-validation are supported.

'auto'
explain_level int

The level of explanations included to each model:

  • if explain_level is 0 no explanations are produced.
  • if explain_level is 1 the following explanations are produced: importance plot (with permutation method), for decision trees produce tree plots, for linear models save coefficients.
  • if explain_level is 2 the following explanations are produced: the same as 1 plus SHAP explanations.

If left auto AutoML will produce explanations based on the selected mode.

'auto'
composite_features boolean or int

Whether to use golden features (and how many should be added) If left auto AutoML will use golden features based on the selected mode:

  • If mode is "Explain", composite_features = False.
  • If mode is "Perform", composite_features = True.
  • If mode is "Compete", composite_features = True.

If boolean value is set then the number of Golden Features is set automatically. It is set to min(100, max(10, 0.1*number_of_input_features)).

If int value is set, the number of Golden Features is set to this value.

'auto'
features_selection boolean

Whether to do features_selection If left auto AutoML will do feature selection based on the selected mode:

  • If mode is "Explain", features_selection = False.
  • If mode is "Perform", features_selection = True.
  • If mode is "Compete", features_selection = True.
'auto'
start_random_models int

Number of starting random models to try. If left auto AutoML will select it based on the selected mode:

  • If mode is "Explain", start_random_models = 1.
  • If mode is "Perform", start_random_models = 5.
  • If mode is "Compete", start_random_models = 10.
'auto'
hill_climbing_steps int

Number of steps to perform during hill climbing. If left auto AutoML will select it based on the selected mode:

  • If mode is "Explain", hill_climbing_steps = 0.
  • If mode is "Perform", hill_climbing_steps = 2.
  • If mode is "Compete", hill_climbing_steps = 2.
'auto'
top_models_to_improve int

Number of best models to improve in hill_climbing steps. If left auto AutoML will select it based on the selected mode:

  • If mode is "Explain", top_models_to_improve = 0.
  • If mode is "Perform", top_models_to_improve = 2.
  • If mode is "Compete", top_models_to_improve = 3.
'auto'
boost_on_errors boolean

Whether a model with boost on errors from previous best model should be trained. By default available in the Compete mode.

'auto'
kmeans_features boolean

Whether a model with k-means generated features should be trained. By default available in the Compete mode.

'auto'
mix_encoding boolean

Whether a model with mixed encoding should be trained. Mixed encoding is the encoding that uses label encoding for categoricals with more than 25 categories, and one-hot binary encoding for other categoricals. It is only applied if there are categorical features with cardinality smaller than 25. By default it is available in the Compete mode.

'auto'
max_single_prediction_time int or float

The limit for prediction time for single sample. Use it if you want to have a model with fast predictions. Ideal for creating ML pipelines used as REST API. Time is in seconds. By default (max_single_prediction_time=None) models are not optimized for fast predictions, except the mode Perform. For the mode Perform the default is 0.5 seconds.

None
optuna_time_budget int

The time in seconds which should be used by Optuna to tune each algorithm. It is time for tuning single algorithm. If you select two algorithms: Xgboost and CatBoost, and set optuna_time_budget=1000, then Xgboost will be tuned for 1000 seconds and CatBoost will be tuned for 1000 seconds. What is more, the tuning is made for each data type, for example for raw data and for data with inserted Golden Features. This parameter is only used when mode="Optuna". If you set mode="Optuna" and forget to set this parameter, it will be set to 3600 seconds.

None
optuna_init_params dict

If you have already tuned parameters from Optuna you can reuse them by setting this parameter. This parameter is only used when mode="Optuna". The dict should have structure and params as specified in the AutoML .

{}
optuna_verbose boolean

If true the Optuna tuning details are displayed. Set to True by default.

True
fairness_metric string

Name of fairness metric that will be used for assessing fairness criteria. Available metrics for binary and multiclass classification:

  • demographic_parity_difference,
  • demographic_parity_ratio - default metric,
  • equalized_odds_difference,
  • equalized_odds_ratio.

Metrics for regression:

  • group_loss_difference,
  • group_loss_ratio - default metric.
'auto'
fairness_threshold float

The treshold value for fairness metric. The direction optimization (below or above threshold) of fairness metric is determined automatically.

Default values:

  • for demographic_parity_difference the metric value should be below 0.1,
  • for demographic_parity_ratio the metric value should be above 0.8,
  • for equalized_odds_difference the metric value should be below 0.1,
  • for equalized_odds_ratio the metric value shoule be above 0.8.
  • for group_loss_ratio the metric value shoule be above 0.8.

For group_loss_difference the default threshold value can't be set because it depends on the dataset. If group_loss_difference metric is used and fairness_threshold is not specified manually, then an exception will be raised.

'auto'
privileged_groups list

The list of privileged groups.

By default, list of privileged groups are automatically detected based on fairness metrics. For example, in binary classification task, a privileged group is the one with the highest selection rate.

Example value: [{"sex": "Male"}]

'auto'
underprivileged_groups list

The list of underprivileged groups.

By default, list of underprivileged groups are automatically detected based on fairness metrics. For example, in binary classification task, an underprivileged group is the one with the lowest selection rate.

Example value: [{"sex": "Female"}]

'auto'
n_jobs int

Number of CPU cores to be used. Default is set to -1 which means using all processors.

-1
verbose int

Controls the verbosity when fitting and predicting.

Note: Still not implemented, please left 1

1
random_state int

Controls the randomness of the AutoML

1234
Source code in supervised\automl.py
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
def __init__(
        self,
        results_path: Optional[str] = None,
        total_time_limit: int = 60 * 60,
        mode: Literal["Explain", "Perform", "Compete", "Optuna"] = "Explain",
        ml_task: Literal[
            "auto", "binary_classification", "multiclass_classification", "regression"
        ] = "auto",
        model_time_limit: Optional[int] = None,
        algorithms: Union[
            Literal["auto"],
            List[
                Literal[
                    "Baseline",
                    "Linear",
                    "Decicion Tree",
                    "Random Forest",
                    "Extra Trees",
                    "LightGBM",
                    "Xgboost",
                    "CatBoost",
                    "Neural Network",
                    "Nearest Neighbors",
                ]
            ],
        ] = "auto",
        train_ensemble: bool = True,
        stack_models: Union[Literal["auto"], bool] = "auto",
        eval_metric: str = "auto",
        validation_strategy: Union[Literal["auto"], dict] = "auto",
        explain_level: Union[Literal["auto"], Literal[0, 1, 2]] = "auto",
        composite_features: Union[Literal["auto"], bool, int] = "auto",
        features_selection: Union[Literal["auto"], bool] = "auto",
        start_random_models: Union[Literal["auto"], int] = "auto",
        hill_climbing_steps: Union[Literal["auto"], int] = "auto",
        top_models_to_improve: Union[Literal["auto"], int] = "auto",
        boost_on_errors: Union[Literal["auto"], bool] = "auto",
        kmeans_features: Union[Literal["auto"], bool] = "auto",
        mix_encoding: Union[Literal["auto"], bool] = "auto",
        max_single_prediction_time: Optional[Union[int, float]] = None,
        optuna_time_budget: Optional[int] = None,
        optuna_init_params: dict = {},
        optuna_verbose: bool = True,
        fairness_metric: str = "auto",
        fairness_threshold: Union[Literal["auto"], float] = "auto",
        privileged_groups: Union[Literal["auto"], list] = "auto",
        underprivileged_groups: Union[Literal["auto"], list] = "auto",
        n_jobs: int = -1,
        verbose: int = 1,
        random_state: int = 1234,
        self_training=False,
):
    """
    Initialize `AutoML` object.

    Arguments:
        results_path (str): The path with results.
            If None, then the name of directory will be generated with the template: AutoML_{number},
            where the number can be from 1 to 1,000 - depends which direcory name will be available.
            If the `results_path` will point to directory with AutoML results (`params.json` must be present),
            then all models will be loaded.

        total_time_limit (int): The total time limit in seconds for AutoML training.
            It is not used when `model_time_limit` is not `None`.

        mode (str): Can be {`Explain`, `Perform`, `Compete`, `Optuna`}.
            This parameter defines the goal of AutoML and how intensive the AutoML search will be.

            - `Explain` : To to be used when the user wants to explain and understand the data.
                - Uses 75%/25% train/test split.
                - Uses the following models: `Baseline`, `Linear`, `Decision Tree`, `Random Forest`, `XGBoost`, `Neural Network`, and `Ensemble`.
                - Has full explanations in reports: learning curves, importance plots, and SHAP plots.
            - `Perform` : To be used when the user wants to train a model that will be used in real-life use cases.
                - Uses 5-fold CV (Cross-Validation).
                - Uses the following models: `Linear`, `Random Forest`, `LightGBM`, `XGBoost`, `CatBoost`, `Neural Network`, and `Ensemble`.
                - Has learning curves and importance plots in reports.
            - `Compete` : To be used for machine learning competitions (maximum performance).
                - Uses 80/20 train/test split, or 5-fold CV, or 10-fold CV (Cross-Validation) - it depends on `total_time_limit`. If not set directly, AutoML will select validation automatically.
                - Uses the following models: `Decision Tree`, `Random Forest`, `Extra Trees`, `LightGBM`,  `XGBoost`, `CatBoost`, `Neural Network`,
                    `Nearest Neighbors`, `Ensemble`, and `Stacking`.
                - It has only learning curves in the reports.
            - `Optuna` : To be used for creating highly-tuned machine learning models.
                - Uses 10-fold CV (Cross-Validation).
                - It tunes with Optuna the following algorithms: `Random Forest`, `Extra Trees`, `LightGBM`, `XGBoost`, `CatBoost`, `Neural Network`.
                - It applies `Ensemble` and `Stacking` for trained models.
                - It has only learning curves in the reports.

        ml_task (str): Can be {"auto", "binary_classification", "multiclass_classification", "regression"}.

            - If left `auto` AutoML will try to guess the task based on target values.
            - If there will be only 2 values in the target, then task will be set to `"binary_classification"`.
            - If number of values in the target will be between 2 and 20 (included), then task will be set to `"multiclass_classification"`.
            - In all other casses, the task is set to `"regression"`.

        model_time_limit (int): The time limit for training a single model, in seconds.
            If `model_time_limit` is set, the `total_time_limit` is not respected.
            The single model can contain several learners. The time limit for subsequent learners is computed based on `model_time_limit`.

            For example, in the case of 10-fold cross-validation, one model will have 10 learners.
            The `model_time_limit` is the time for all 10 learners.

        algorithms (list of str): The list of algorithms that will be used in the training.
            The algorithms can be:

            - `Baseline`,
            - `Linear`,
            - `Decision Tree`,
            - `Random Forest`,
            - `Extra Trees`,
            - `LightGBM`,
            - `Xgboost`,
            - `CatBoost`,
            - `Neural Network`,
            - `Nearest Neighbors`,


        train_ensemble (boolean): Whether an ensemble gets created at the end of the training.

        stack_models (boolean): Whether a models stack gets created at the end of the training. Stack level is 1.

        eval_metric (str): The metric to be used in early stopping and to compare models.

            - for binary classification: `logloss`, `auc`, `f1`, `average_precision`, `accuracy` - default is logloss (if left "auto")
            - for mutliclass classification: `logloss`, `f1`, `accuracy` - default is `logloss` (if left "auto")
            - for regression: `rmse`, `mse`, `mae`, `r2`, `mape`, `spearman`, `pearson` - default is `rmse` (if left "auto")

        validation_strategy (dict): Dictionary with validation type. Train/test split and cross-validation are supported.

        explain_level (int): The level of explanations included to each model:

            - if `explain_level` is `0` no explanations are produced.
            - if `explain_level` is `1` the following explanations are produced: importance plot (with permutation method), for decision trees produce tree plots, for linear models save coefficients.
            - if `explain_level` is `2` the following explanations are produced: the same as `1` plus SHAP explanations.

            If left `auto` AutoML will produce explanations based on the selected `mode`.

        composite_features (boolean or int): Whether to use golden features (and how many should be added)
            If left `auto` AutoML will use golden features based on the selected `mode`:

            - If `mode` is "Explain", `composite_features` = False.
            - If `mode` is "Perform", `composite_features` = True.
            - If `mode` is "Compete", `composite_features` = True.

            If `boolean` value is set then the number of Golden Features is set automatically.
            It is set to min(100, max(10, 0.1*number_of_input_features)).

            If `int` value is set, the number of Golden Features is set to this value.

        features_selection (boolean): Whether to do features_selection
            If left `auto` AutoML will do feature selection based on the selected `mode`:

            - If `mode` is "Explain", `features_selection` = False.
            - If `mode` is "Perform", `features_selection` = True.
            - If `mode` is "Compete", `features_selection` = True.

        start_random_models (int): Number of starting random models to try.
            If left `auto` AutoML will select it based on the selected `mode`:

            - If `mode` is "Explain", `start_random_models` = 1.
            - If `mode` is "Perform", `start_random_models` = 5.
            - If `mode` is "Compete", `start_random_models` = 10.

        hill_climbing_steps (int): Number of steps to perform during hill climbing.
            If left `auto` AutoML will select it based on the selected `mode`:

            - If `mode` is "Explain", `hill_climbing_steps` = 0.
            - If `mode` is "Perform", `hill_climbing_steps` = 2.
            - If `mode` is "Compete", `hill_climbing_steps` = 2.

        top_models_to_improve (int): Number of best models to improve in `hill_climbing` steps.
            If left `auto` AutoML will select it based on the selected `mode`:

            - If `mode` is "Explain", `top_models_to_improve` = 0.
            - If `mode` is "Perform", `top_models_to_improve` = 2.
            - If `mode` is "Compete", `top_models_to_improve` = 3.

        boost_on_errors (boolean): Whether a model with boost on errors from previous best model should be trained. By default available in the `Compete` mode.

        kmeans_features (boolean): Whether a model with k-means generated features should be trained. By default available in the `Compete` mode.

        mix_encoding (boolean): Whether a model with mixed encoding should be trained. Mixed encoding is the encoding that uses label encoding
            for categoricals with more than 25 categories, and one-hot binary encoding for other categoricals. It is only applied if there are
            categorical features with cardinality smaller than 25. By default it is available in the `Compete` mode.

        max_single_prediction_time (int or float): The limit for prediction time for single sample. Use it if you want to have a model with fast predictions.
            Ideal for creating ML pipelines used as REST API. Time is in seconds. By default (`max_single_prediction_time=None`) models are not optimized for fast predictions,
            except the mode `Perform`. For the mode `Perform` the default is `0.5` seconds.

        optuna_time_budget (int): The time in seconds which should be used by Optuna to tune each algorithm. It is time for tuning single algorithm.
            If you select two algorithms: Xgboost and CatBoost, and set optuna_time_budget=1000, then Xgboost will be tuned for 1000 seconds and CatBoost will be tuned for 1000 seconds.
            What is more, the tuning is made for each data type, for example for raw data and for data with inserted Golden Features.
            This parameter is only used when `mode="Optuna"`. If you set `mode="Optuna"` and forget to set this parameter, it will be set to 3600 seconds.

        optuna_init_params (dict): If you have already tuned parameters from Optuna you can reuse them by setting this parameter.
            This parameter is only used when `mode="Optuna"`. The dict should have structure and params as specified in the AutoML .

        optuna_verbose (boolean): If true the Optuna tuning details are displayed. Set to `True` by default.

        fairness_metric (string): Name of fairness metric that will be used for assessing fairness criteria.
            Available metrics for binary and multiclass classification:

            - `demographic_parity_difference`,
            - `demographic_parity_ratio` - default metric,
            - `equalized_odds_difference`,
            - `equalized_odds_ratio`.

            Metrics for regression:

            - `group_loss_difference`,
            - `group_loss_ratio` - default metric.


        fairness_threshold (float): The treshold value for fairness metric.
            The direction optimization (below or above threshold) of fairness metric is determined automatically.

            Default values:

            - for `demographic_parity_difference` the metric value should be below 0.1,
            - for `demographic_parity_ratio` the metric value should be above 0.8,
            - for `equalized_odds_difference` the metric value should be below 0.1,
            - for `equalized_odds_ratio` the metric value shoule be above 0.8.
            - for `group_loss_ratio` the metric value shoule be above 0.8.

            For `group_loss_difference` the default threshold value can't be set because it depends on the dataset.
            If `group_loss_difference` metric is used and `fairness_threshold` is not specified manually, then an exception will be raised.

        privileged_groups (list): The list of privileged groups.

            By default, list of privileged groups are automatically detected based on fairness metrics.
            For example, in binary classification task, a privileged group is the one with the highest selection rate.

            Example value: `[{"sex": "Male"}]`

        underprivileged_groups (list): The list of underprivileged groups.

            By default, list of underprivileged groups are automatically detected based on fairness metrics.
            For example, in binary classification task, an underprivileged group is the one with the lowest selection rate.

            Example value: `[{"sex": "Female"}]`

        n_jobs (int): Number of CPU cores to be used. Default is set to `-1` which means using  all processors.

        verbose (int): Controls the verbosity when fitting and predicting.

            Note:
                Still not implemented, please left `1`

        random_state (int): Controls the randomness of the `AutoML`


    """
    super(AutoML, self).__init__()
    # Set user arguments
    self.mode = mode
    self.ml_task = ml_task
    self.results_path = results_path
    self.total_time_limit = total_time_limit
    self.model_time_limit = model_time_limit
    self.algorithms = algorithms
    self.train_ensemble = train_ensemble
    self.stack_models = stack_models
    self.eval_metric = eval_metric
    self.validation_strategy = validation_strategy
    self.verbose = verbose
    self.explain_level = explain_level
    self.composite_features = composite_features
    self.features_selection = features_selection
    self.start_random_models = start_random_models
    self.hill_climbing_steps = hill_climbing_steps
    self.top_models_to_improve = top_models_to_improve
    self.boost_on_errors = boost_on_errors
    self.kmeans_features = kmeans_features
    self.mix_encoding = mix_encoding
    self.max_single_prediction_time = max_single_prediction_time
    self.optuna_time_budget = optuna_time_budget
    self.optuna_init_params = optuna_init_params
    self.optuna_verbose = optuna_verbose
    self.fairness_metric = fairness_metric
    self.fairness_threshold = fairness_threshold
    self.privileged_groups = privileged_groups
    self.underprivileged_groups = underprivileged_groups
    self.n_jobs = n_jobs
    self.random_state = random_state
    self.self_training = self_training

fit(X, y, sample_weight=None, cv=None, sensitive_features=None)

Fit the AutoML model.

Parameters:

Name Type Description Default
X numpy.ndarray or pandas.DataFrame

Training data

required
y numpy.ndarray or pandas.Series

Training targets

required
sample_weight numpy.ndarray or pandas.Series

Training sample weights

None
cv iterable or list

List or iterable with (train, validation) splits representing array of indices. It is used only with custom validation (validation_strategy={'validation_type': 'custom'}).

None
sensitive_features numpy.ndarray or pandas.Series or pandas.DataFrame

Sensitive features to learn fair models

None

Returns:

Type Description

AutoML object: Returns self

Source code in supervised\automl.py
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
def fit(
        self,
        X: Union[numpy.ndarray, pandas.DataFrame],
        y: Union[numpy.ndarray, pandas.Series],
        sample_weight: Optional[Union[numpy.ndarray, pandas.Series]] = None,
        cv: Optional[Union[Iterable, List]] = None,
        sensitive_features: Optional[
            Union[numpy.ndarray, pandas.Series, pandas.DataFrame]
        ] = None,
):
    """Fit the AutoML model.

    Arguments:
        X (numpy.ndarray or pandas.DataFrame): Training data

        y (numpy.ndarray or pandas.Series): Training targets

        sample_weight (numpy.ndarray or pandas.Series): Training sample weights

        cv (iterable or list): List or iterable with (train, validation) splits representing array of indices.
            It is used only with custom validation (`validation_strategy={'validation_type': 'custom'}`).

        sensitive_features (numpy.ndarray or pandas.Series or pandas.DataFrame):
            Sensitive features to learn fair models

    Returns:
        AutoML object: Returns `self`
    """
    return self._fit(X, y, sample_weight, cv, sensitive_features)

need_retrain(X, y, sample_weight=None, decrease=0.1)

Decides about model retraining based on new data.

Parameters:

Name Type Description Default
X numpy.ndarray or pandas.DataFrame

New data.

required
y numpy.ndarray or pandas.Series

True labels for X.

required
sample_weight numpy.ndarray or pandas.Series

Sample weights.

None
decrease float

The ratio of change in the performance used as a threshold for retraining decision. By default, it is set to 0.1 which means that if the performance of AutoML will decrease by 10% on new data then there is a need to retrain.

0.1
Returns

boolean: Decides if there is a need to retrain the AutoML.

required
Source code in supervised\automl.py
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
def need_retrain(
        self,
        X: Union[numpy.ndarray, pandas.DataFrame],
        y: Union[numpy.ndarray, pandas.Series],
        sample_weight: Optional[Union[numpy.ndarray, pandas.Series]] = None,
        decrease: float = 0.1,
) -> bool:
    """Decides about model retraining based on new data.

    Arguments:
        X (numpy.ndarray or pandas.DataFrame):
            New data.

        y (numpy.ndarray or pandas.Series):
            True labels for X.

        sample_weight (numpy.ndarray or pandas.Series):
            Sample weights.

        decrease (float): The ratio of change in the performance used as a threshold for retraining decision.
            By default, it is set to `0.1` which means that if the performance of AutoML will decrease by 10%
            on new data then there is a need to retrain.

        Returns:
            boolean: Decides if there is a need to retrain the AutoML.
    """
    return self._need_retrain(X, y, sample_weight, decrease)

predict(X)

Computes predictions from AutoML best model.

Parameters:

Name Type Description Default
X list or numpy.ndarray or pandas.DataFrame

Input values to make predictions on.

required

Returns:

Type Description
numpy.ndarray

numpy.ndarray:

numpy.ndarray
  • One-dimensional array of class labels for classification.
numpy.ndarray
  • One-dimensional array of predictions for regression.
Source code in supervised\automl.py
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
def predict(self, X: Union[List, numpy.ndarray, pandas.DataFrame]) -> numpy.ndarray:
    """
    Computes predictions from AutoML best model.

    Arguments:
        X (list or numpy.ndarray or pandas.DataFrame):
            Input values to make predictions on.

    Returns:
        numpy.ndarray:

        - One-dimensional array of class labels for classification.
        - One-dimensional array of predictions for regression.
    """
    return self._predict(X)

predict_all(X)

Computes both class probabilities and class labels for classification tasks. Computes predictions for regression tasks.

Parameters:

Name Type Description Default
X list or numpy.ndarray or pandas.DataFrame

Input values to make predictions on.

required

Returns:

Type Description
pandas.DataFrame

pandas.Dataframe: Dataframe (n_samples, n_classes + 1) containing both class probabilities and class labels of the input samples for classification tasks. Dataframe with predictions for regression tasks.

Raises:

Type Description
AutoMLException

Model has not yet been fitted.

Source code in supervised\automl.py
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
def predict_all(
        self, X: Union[List, numpy.ndarray, pandas.DataFrame]
) -> pandas.DataFrame:
    """
    Computes both class probabilities and class labels for classification tasks.
    Computes predictions for regression tasks.

    Arguments:
        X (list or numpy.ndarray or pandas.DataFrame):
            Input values to make predictions on.

    Returns:
        pandas.Dataframe:
            Dataframe (n_samples, n_classes + 1) containing both class probabilities and class
            labels of the input samples for classification tasks.
            Dataframe with predictions for regression tasks.

    Raises:
        AutoMLException: Model has not yet been fitted.

    """
    return self._predict_all(X)

predict_proba(X)

Computes class probabilities from AutoML best model. This method can only be used for classification tasks.

Parameters:

Name Type Description Default
X list or numpy.ndarray or pandas.DataFrame

Input values to make predictions on.

required

Returns:

Type Description
numpy.ndarray

numpy.ndarray of shape (n_samples, n_classes): Matrix of containing class probabilities of the input samples

Raises:

Type Description
AutoMLException

Model has not yet been fitted.

Source code in supervised\automl.py
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
def predict_proba(
        self, X: Union[List, numpy.ndarray, pandas.DataFrame]
) -> numpy.ndarray:
    """
    Computes class probabilities from AutoML best model.
    This method can only be used for classification tasks.

    Arguments:
        X (list or numpy.ndarray or pandas.DataFrame):
            Input values to make predictions on.

    Returns:
        numpy.ndarray of shape (n_samples, n_classes):
            Matrix of containing class probabilities of the input samples

    Raises:
        AutoMLException: Model has not yet been fitted.

    """
    return self._predict_proba(X)

score(X, y=None, sample_weight=None)

Calculates a goodness of fit for an AutoML instance.

Parameters:

Name Type Description Default
X numpy.ndarray or pandas.DataFrame

Test values to make predictions on.

required
y numpy.ndarray or pandas.Series

True labels for X.

None
sample_weight numpy.ndarray or pandas.Series

Sample weights.

None

Returns:

Name Type Description
float float

Returns a goodness of fit measure (higher is better):

float
  • For classification tasks: returns the mean accuracy on the given test data and labels.
float
  • For regression tasks: returns the R^2 (coefficient of determination) on the given test data and labels.
Source code in supervised\automl.py
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
def score(
        self,
        X: Union[numpy.ndarray, pandas.DataFrame],
        y: Optional[Union[numpy.ndarray, pandas.Series]] = None,
        sample_weight: Optional[Union[numpy.ndarray, pandas.Series]] = None,
) -> float:
    """Calculates a goodness of `fit` for an AutoML instance.

    Arguments:
        X (numpy.ndarray or pandas.DataFrame):
            Test values to make predictions on.

        y (numpy.ndarray or pandas.Series):
            True labels for X.

        sample_weight (numpy.ndarray or pandas.Series):
            Sample weights.
    Returns:
        float: Returns a goodness of fit measure (higher is better):

        - For classification tasks: returns the mean accuracy on the given test data and labels.
        - For regression tasks: returns the R^2 (coefficient of determination) on the given test data and labels.
    """
    return self._score(X, y, sample_weight)

ModelFramework class

Source code in supervised\model_framework.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
class ModelFramework:
    def __init__(self, params, callbacks=[]):
        logger.debug("ModelFramework.__init__")
        self.uid = str(uuid.uuid4())

        for i in ["learner", "validation_strategy"]:  # mandatory parameters
            if i not in params:
                msg = "Missing {0} parameter in ModelFramework params".format(i)
                logger.error(msg)
                raise ValueError(msg)

        self.params = params
        self.callbacks = CallbackList(callbacks)

        self._name = params.get("name", "model")
        self.additional_params = params.get("additional")
        self.preprocessing_params = params.get("preprocessing")
        self.validation_params = params.get("validation_strategy")
        self.learner_params = params.get("learner")

        self._ml_task = params.get("ml_task")
        self._explain_level = params.get("explain_level")
        self._is_stacked = params.get("is_stacked", False)

        self.validation = None
        self.preprocessings = []
        self.learners = []

        self.train_time = None
        self.final_loss = None
        self.metric_name = None
        self.oof_predictions = None
        self._additional_metrics = None
        self._threshold = None  # used only for binary classifiers
        self._max_time_for_learner = params.get("max_time_for_learner", 3600)
        self._oof_predictions_fname = None
        self._single_prediction_time = None  # prediction time on single sample
        self._optuna_time_budget = params.get("optuna_time_budget")
        self._optuna_init_params = params.get("optuna_init_params", {})
        self._optuna_verbose = params.get("optuna_verbose", True)

        self._fairness_metric = params.get("fairness_metric")
        self._fairness_threshold = params.get("fairness_threshold")
        self._privileged_groups = params.get("privileged_groups", [])
        self._underprivileged_groups = params.get("underprivileged_groups", [])
        self._fairness_optimization = params.get("fairness_optimization")
        self._is_fair = None

        # the automl random state from AutoML constructor, used in Optuna optimizer
        self._automl_random_state = params.get("automl_random_state", 42)

    def get_train_time(self):
        return self.train_time

    def predictions(
        self,
        learner,
        preproces,
        X_train,
        y_train,
        sample_weight,
        sensitive_features,
        X_validation,
        y_validation,
        sample_weight_validation,
        sensitive_features_validation,
    ):
        y_train_true = y_train
        y_train_predicted = learner.predict(X_train)
        y_validation_true = y_validation
        y_validation_predicted = learner.predict(X_validation)

        y_train_true = preproces.inverse_scale_target(y_train_true)
        y_train_predicted = preproces.inverse_scale_target(y_train_predicted)
        y_validation_true = preproces.inverse_scale_target(y_validation_true)
        y_validation_predicted = preproces.inverse_scale_target(y_validation_predicted)

        y_validation_columns = []
        if self._ml_task == MULTICLASS_CLASSIFICATION:
            # y_train_true = preproces.inverse_categorical_target(y_train_true)
            # y_validation_true = preproces.inverse_categorical_target(y_validation_true)
            # get columns, omit the last one (it is label)
            y_validation_columns = preproces.prepare_target_labels(
                y_validation_predicted
            ).columns.tolist()[:-1]
        elif self._ml_task == BINARY_CLASSIFICATION:
            class_names = self.preprocessings[-1].get_target_class_names()
            y_validation_columns = "prediction"
            if not ("0" in class_names and "1" in class_names):
                y_validation_columns = (
                    f"prediction_0_for_{class_names[0]}_1_for_{class_names[1]}"
                )
        else:
            y_validation_columns = "prediction"

        return {
            "y_train_true": y_train_true,
            "y_train_predicted": y_train_predicted,
            "sample_weight": sample_weight,
            "sensitive_features": sensitive_features,
            "y_validation_true": y_validation_true,
            "y_validation_predicted": y_validation_predicted,
            "sample_weight_validation": sample_weight_validation,
            "sensitive_features_validation": sensitive_features_validation,
            "validation_index": X_validation.index,
            "validation_columns": y_validation_columns,
        }

    def train(self, results_path, model_subpath):
        logger.debug(f"ModelFramework.train {self.learner_params.get('model_type')}")

        start_time = time.time()
        np.random.seed(self.learner_params["seed"])

        optuna_tuner = None
        if self._optuna_time_budget is not None and OptunaTuner.is_optimizable(
            self.learner_params.get("model_type", "")
        ):
            optuna_tuner = OptunaTuner(
                results_path,
                ml_task=self._ml_task,
                eval_metric=self.get_metric(),
                time_budget=self._optuna_time_budget,
                init_params=self._optuna_init_params,
                verbose=self._optuna_verbose,
                n_jobs=self.learner_params.get("n_jobs", -1),
                random_state=self._automl_random_state,
            )

        self.validation = ValidationStep(self.validation_params)

        repeats = self.validation.get_repeats()
        for repeat in range(repeats):
            for k_fold in range(self.validation.get_n_splits()):
                train_data, validation_data = self.validation.get_split(k_fold, repeat)
                logger.debug(
                    "Data split, train X:{} y:{}, validation X:{}, y:{}".format(
                        train_data["X"].shape,
                        train_data["y"].shape,
                        validation_data["X"].shape,
                        validation_data["y"].shape,
                    )
                )
                if "sample_weight" in train_data:
                    logger.debug("Sample weight available during the training.")

                # the proprocessing is done at every validation step
                self.preprocessings += [
                    Preprocessing(
                        self.preprocessing_params, self.get_name(), k_fold, repeat
                    )
                ]

                X_train, y_train, sample_weight = self.preprocessings[
                    -1
                ].fit_and_transform(
                    train_data["X"], train_data["y"], train_data.get("sample_weight")
                )
                (
                    X_validation,
                    y_validation,
                    sample_weight_validation,
                ) = self.preprocessings[-1].transform(
                    validation_data["X"],
                    validation_data["y"],
                    validation_data.get("sample_weight"),
                )

                # get sensitive features from data split
                sensitive_features = train_data.get("sensitive_features")
                sensitive_features_validation = validation_data.get(
                    "sensitive_features"
                )

                if optuna_tuner is not None:
                    optuna_start_time = time.time()
                    self.learner_params = optuna_tuner.optimize(
                        self.learner_params.get("model_type", ""),
                        self.params.get("data_type", ""),
                        X_train,
                        y_train,
                        sample_weight,
                        X_validation,
                        y_validation,
                        sample_weight_validation,
                        self.learner_params,
                    )
                    # exclude optuna optimize time from model training
                    start_time += time.time() - optuna_start_time

                self.learner_params["explain_level"] = self._explain_level
                self.learners += [
                    AlgorithmFactory.get_algorithm(copy.deepcopy(self.learner_params))
                ]
                learner = self.learners[-1]
                learner.set_learner_name(k_fold, repeat, repeats)

                self.callbacks.add_and_set_learner(learner)
                self.callbacks.on_learner_train_start()

                log_to_file = os.path.join(
                    results_path, model_subpath, f"{learner.name}_training.log"
                )

                for i in range(learner.max_iters):
                    self.callbacks.on_iteration_start()

                    learner.fit(
                        X_train,
                        y_train,
                        sample_weight,
                        X_validation,
                        y_validation,
                        sample_weight_validation,
                        log_to_file,
                        self._max_time_for_learner,
                    )

                    if self.params.get("injected_sample_weight", False):
                        # print("Dont use sample weight in model evaluation")
                        sample_weight = None
                        sample_weight_validation = None

                    self.callbacks.on_iteration_end(
                        {"iter_cnt": i},
                        self.predictions(
                            learner,
                            self.preprocessings[-1],
                            X_train,
                            y_train,
                            sample_weight,
                            sensitive_features,
                            X_validation,
                            y_validation,
                            sample_weight_validation,
                            sensitive_features_validation,
                        ),
                    )

                    if learner.stop_training:
                        break
                    learner.update({"step": i})

                # end of learner iters loop
                self.callbacks.on_learner_train_end()

                model_path = os.path.join(results_path, model_subpath)
                learner.interpret(
                    X_train,
                    y_train,
                    X_validation,
                    y_validation,
                    model_file_path=model_path,
                    learner_name=learner.name,
                    class_names=self.preprocessings[-1].get_target_class_names(),
                    metric_name=self.get_metric_name(),
                    ml_task=self._ml_task,
                    explain_level=self._explain_level,
                )

                # save learner and free the memory
                p = os.path.join(model_path, learner.get_fname())
                learner.save(p)
                del learner.model
                learner.model = None
                # end of learner training

                # clear data
                del X_train
                del y_train
                del X_validation
                del y_validation

                if sample_weight is not None:
                    del sample_weight
                    del train_data["sample_weight"]
                if sample_weight_validation is not None:
                    del sample_weight_validation
                    del validation_data["sample_weight"]

                del train_data["X"]
                del train_data["y"]
                del validation_data["X"]
                del validation_data["y"]
                del train_data
                del validation_data

                gc.collect()

        # end of validation loop
        self.callbacks.on_framework_train_end()
        # self.get_additional_metrics()
        self._additional_metrics = self.get_additional_metrics()

        self.train_time = time.time() - start_time
        logger.debug("ModelFramework end of training")

    def release_learners(self):
        for learner in self.learners:
            if learner.model is not None:
                del learner.model
                learner.model = None

    def get_metric_name(self):
        if self.metric_name is not None:
            return self.metric_name
        early_stopping = self.callbacks.get("early_stopping")
        if early_stopping is None:
            return None
        self.metric_name = early_stopping.metric.name
        return early_stopping.metric.name

    def get_metric(self):
        early_stopping = self.callbacks.get("early_stopping")
        if early_stopping:
            return early_stopping.metric
        return Metric({"name": self.get_metric_name()})

    def get_out_of_folds(self):
        if self.oof_predictions is not None:
            return self.oof_predictions.copy(deep=True)

        if self._oof_predictions_fname is not None:
            self.oof_predictions = pd.read_csv(self._oof_predictions_fname)
            return self.oof_predictions.copy(deep=True)

        early_stopping = self.callbacks.get("early_stopping")
        if early_stopping is None:
            return None
        self.oof_predictions = early_stopping.best_y_oof

        ###############################################################
        # in case of one-hot coded multiclass target
        target_cols = [
            c for c in self.oof_predictions.columns.tolist() if "target" in c
        ]
        if len(target_cols) > 1:
            target = self.oof_predictions[target_cols[0]].copy()
            target.name = "target"
            for i, t in enumerate(target_cols):
                target[self.oof_predictions[t] == 1] = i
            self.oof_predictions.drop(target_cols, axis=1, inplace=True)

            self.oof_predictions.insert(0, "target", np.array(target))

        return early_stopping.best_y_oof

    def get_final_loss(self):
        if self.final_loss is not None:
            return self.final_loss
        early_stopping = self.callbacks.get("early_stopping")
        if early_stopping is None:
            return None
        self.final_loss = early_stopping.final_loss
        return early_stopping.final_loss

    """
    def get_metric_logs(self):
        metric_logger = self.callbacks.get("metric_logger")
        if metric_logger is None:
            return None
        return metric_logger.loss_values
    """

    def get_type(self):
        return self.learner_params.get("model_type")

    def get_name(self):
        return self._name

    def involved_model_names(self):
        """Returns the list of all models involved in the current model.
        For single model, it returns the list with the name of the model.
        For ensemble model, it returns the list with the name of the ensemble and all internal models
        (used to build ensemble).
        For single model but trained on stacked data, it returns the list with the name of the model
        (names of models used in stacking are not included)."""
        return [self._name]

    def is_valid(self):
        """is_valid is used in Ensemble to check if it has more than 1 model in it.
        If Ensemble has only 1 model in it, then Ensemble shouldn't be used as best model
        """
        return True

    def is_fast_enough(self, max_single_prediction_time):
        # dont need to check
        if max_single_prediction_time is None:
            return True

        # no iformation about prediction time
        if self._single_prediction_time is None:
            return True

        return self._single_prediction_time < max_single_prediction_time

    def predict(self, X):
        logger.debug("ModelFramework.predict")

        if self.learners is None or len(self.learners) == 0:
            raise Exception("Learnes are not initialized")
        # run predict on all learners and return the average
        y_predicted = None  # np.zeros((X.shape[0],))
        for ind, learner in enumerate(self.learners):
            # preprocessing goes here
            X_data, _, _ = self.preprocessings[ind].transform(X.copy(), None)
            y_p = learner.predict(X_data)
            y_p = self.preprocessings[ind].inverse_scale_target(y_p)

            y_predicted = y_p if y_predicted is None else y_predicted + y_p

        y_predicted_average = y_predicted / float(len(self.learners))

        y_predicted_final = self.preprocessings[0].prepare_target_labels(
            y_predicted_average
        )

        return y_predicted_final

    def get_additional_metrics(self):
        if self._additional_metrics is None:
            # 'target' - the target after processing used for model training
            # 'prediction' - out of folds predictions of the model
            oof_predictions = self.get_out_of_folds()
            prediction_cols = [c for c in oof_predictions.columns if "prediction" in c]
            target_cols = [c for c in oof_predictions.columns if "target" in c]

            target = oof_predictions[target_cols]

            oof_preds = None
            if self._ml_task == MULTICLASS_CLASSIFICATION:
                oof_preds = self.preprocessings[0].prepare_target_labels(
                    oof_predictions[prediction_cols].values
                )
            else:
                oof_preds = oof_predictions[prediction_cols]

            sample_weight = None
            if "sample_weight" in oof_predictions.columns:
                sample_weight = oof_predictions["sample_weight"]

            sensitive_features = None
            sensitive_cols = [c for c in oof_predictions.columns if "sensitive" in c]
            if sensitive_cols:
                sensitive_features = oof_predictions[sensitive_cols]

            self._additional_metrics = AdditionalMetrics.compute(
                target,
                oof_preds,
                sample_weight,
                self._ml_task,
                sensitive_features,
                self._fairness_metric
                if self._ml_task != REGRESSION
                else f"{self._fairness_metric}@{self.get_metric_name()}",
                self._fairness_threshold,
                self._privileged_groups,
                self._underprivileged_groups,
                self._fairness_optimization,
            )
            if self._ml_task == BINARY_CLASSIFICATION:
                self._threshold = float(self._additional_metrics["threshold"])
        return self._additional_metrics

    def get_sensitive_features_names(self):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return [i for i in list(fm.keys()) if i != "fairness_optimization"]

    def get_fairness_metric(self, col_name):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return fm.get(col_name, {}).get("fairness_metric_value")

    def get_fairness_optimization(self):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return fm.get("fairness_optimization", {})

    def get_worst_fairness(self):
        # We have fairness metrics per sensitive feature.
        # The worst fairness metric is:
        # - for ratio metrics, the lowest fairness value from all sensitive features
        # - for difference metrics, the highest fairness value from all sensitive features
        # It is needed as bias mitigation stop criteria.

        metrics = self.get_additional_metrics()

        fm = metrics.get("fairness_metrics", {})
        worst_value = None
        for col_name, values in fm.items():
            if col_name == "fairness_optimization":
                continue
            if "ratio" in self._fairness_metric.lower():
                if worst_value is None:
                    worst_value = values.get("fairness_metric_value", 0)
                else:
                    worst_value = min(
                        worst_value, values.get("fairness_metric_value", 0)
                    )
            else:
                if worst_value is None:
                    worst_value = values.get("fairness_metric_value", 1)
                else:
                    worst_value = max(
                        worst_value, values.get("fairness_metric_value", 1)
                    )

        return worst_value

    def get_best_fairness(self):
        # We have fairness metrics per sensitive feature.
        # The best fairness metric is:
        # - for ratio metrics, the highest fairness value from all sensitive features
        # - for difference metrics, the lowest fairness value from all sensitive features
        # It is needed as bias mitigation stop criteria.

        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        best_value = None
        for col_name, values in fm.items():
            if col_name == "fairness_optimization":
                continue
            if "ratio" in self._fairness_metric.lower():
                if best_value is None:
                    best_value = values.get("fairness_metric_value", 0)
                else:
                    best_value = max(best_value, values.get("fairness_metric_value", 0))
            else:
                if best_value is None:
                    best_value = values.get("fairness_metric_value", 1)
                else:
                    best_value = min(best_value, values.get("fairness_metric_value", 1))

        return best_value

    def is_fair(self):
        if self._is_fair is not None:
            return self._is_fair
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        for col, m in fm.items():
            if col == "fairness_optimization":
                continue
            if not m.get("is_fair", True):
                self._is_fair = False
                return False
        self._is_fair = True
        return False

    def save(self, results_path, model_subpath):
        start_time = time.time()
        model_path = os.path.join(results_path, model_subpath)
        logger.info(f"Save the model {model_path}")

        type_of_predictions = (
            "validation" if "k_folds" not in self.validation_params else "out_of_folds"
        )
        predictions_fname = os.path.join(
            model_subpath, f"predictions_{type_of_predictions}.csv"
        )
        self._oof_predictions_fname = os.path.join(results_path, predictions_fname)
        predictions = self.get_out_of_folds()
        predictions.to_csv(self._oof_predictions_fname, index=False)

        saved = [os.path.join(model_subpath, l.get_fname()) for l in self.learners]

        with open(os.path.join(model_path, "framework.json"), "w") as fout:
            preprocessing = [p.to_json() for p in self.preprocessings]
            learners_params = [learner.get_params() for learner in self.learners]

            desc = {
                "uid": self.uid,
                "name": self._name,
                "preprocessing": preprocessing,
                "learners": learners_params,
                "params": self.params,
                "saved": saved,
                "predictions_fname": predictions_fname,
                "metric_name": self.get_metric_name(),
                "final_loss": self.get_final_loss(),
                "train_time": self.get_train_time(),
                "is_stacked": self._is_stacked,
                "joblib_version": joblib.__version__,
            }
            desc["final_loss"] = str(desc["final_loss"])
            if self._threshold is not None:
                desc["threshold"] = self._threshold
            if self._single_prediction_time is not None:
                desc["single_prediction_time"] = self._single_prediction_time
            fout.write(json.dumps(desc, indent=4))

        learning_curve_metric = self.learners[0].get_metric_name()
        if learning_curve_metric is None:
            learning_curve_metric = self.get_metric_name()

        LearningCurves.plot(
            [l.name for l in self.learners],
            learning_curve_metric,
            model_path,
            trees_in_iteration=self.additional_params.get("trees_in_step"),
        )

        # call additional metics just to be sure they are computed
        self._additional_metrics = self.get_additional_metrics()

        AdditionalMetrics.save(
            self._additional_metrics, self._ml_task, self.model_markdown(), model_path
        )

        with open(os.path.join(model_path, "status.txt"), "w") as fout:
            fout.write("ALL OK!")

        # Adding save time to total train time
        self.train_time += time.time() - start_time

    def model_markdown(self):
        long_name = AlgorithmsRegistry.get_long_name(
            self._ml_task, self.learner_params["model_type"]
        )
        short_name = self.learner_params["model_type"]
        desc = f"# Summary of {self.get_name()}\n\n"

        desc += "[<< Go back](../README.md)\n\n"

        if long_name == short_name:
            desc += f"\n## {short_name}\n"
        else:
            desc += f"\n## {long_name} ({short_name})\n"
        for k, v in self.learner_params.items():
            if k in ["model_type", "ml_task", "seed"]:
                continue
            desc += f"- **{k}**: {v}\n"
        desc += "\n## Validation\n"
        for k, v in self.validation_params.items():
            if "path" not in k:
                desc += f" - **{k}**: {v}\n"
        desc += "\n## Optimized metric\n"
        desc += f"{self.get_metric_name()}\n"
        desc += "\n## Training time\n"
        desc += f"\n{np.round(self.train_time,1)} seconds\n"
        return desc

    @staticmethod
    def load(results_path, model_subpath, lazy_load=True):
        model_path = os.path.join(results_path, model_subpath)
        logger.info(f"Loading model framework from {model_path}")

        json_desc = json.load(open(os.path.join(model_path, "framework.json")))

        joblib_version_computer = joblib.__version__
        joblib_version_framework = json_desc.get("joblib_version")

        if (
            joblib_version_framework is not None
            and joblib_version_computer != joblib_version_framework
        ):
            raise AutoMLException(
                f"Joblib version mismatch. Computer: {joblib_version_computer}, Framework: {joblib_version_framework}. Change to Framework version!"
            )

        mf = ModelFramework(json_desc["params"])
        mf.uid = json_desc.get("uid", mf.uid)
        mf._name = json_desc.get("name", mf._name)
        mf._threshold = json_desc.get("threshold")
        mf.train_time = json_desc.get("train_time", mf.train_time)
        mf.final_loss = json_desc.get("final_loss", mf.final_loss)
        mf.metric_name = json_desc.get("metric_name", mf.metric_name)
        mf._is_stacked = json_desc.get("is_stacked", mf._is_stacked)
        mf._single_prediction_time = json_desc.get(
            "single_prediction_time", mf._single_prediction_time
        )
        predictions_fname = json_desc.get("predictions_fname")
        if predictions_fname is not None:
            mf._oof_predictions_fname = os.path.join(results_path, predictions_fname)

        mf.learners = []
        for learner_desc, learner_subpath in zip(
            json_desc.get("learners"), json_desc.get("saved")
        ):
            learner_path = os.path.join(results_path, learner_subpath)
            l = AlgorithmFactory.load(learner_desc, learner_path, lazy_load)
            mf.learners += [l]

        mf.preprocessings = []
        for p in json_desc.get("preprocessing"):
            ps = Preprocessing()
            ps.from_json(p, results_path)
            mf.preprocessings += [ps]

        return mf

involved_model_names()

Returns the list of all models involved in the current model. For single model, it returns the list with the name of the model. For ensemble model, it returns the list with the name of the ensemble and all internal models (used to build ensemble). For single model but trained on stacked data, it returns the list with the name of the model (names of models used in stacking are not included).

Source code in supervised\model_framework.py
412
413
414
415
416
417
418
419
def involved_model_names(self):
    """Returns the list of all models involved in the current model.
    For single model, it returns the list with the name of the model.
    For ensemble model, it returns the list with the name of the ensemble and all internal models
    (used to build ensemble).
    For single model but trained on stacked data, it returns the list with the name of the model
    (names of models used in stacking are not included)."""
    return [self._name]

is_valid()

is_valid is used in Ensemble to check if it has more than 1 model in it. If Ensemble has only 1 model in it, then Ensemble shouldn't be used as best model

Source code in supervised\model_framework.py
421
422
423
424
425
def is_valid(self):
    """is_valid is used in Ensemble to check if it has more than 1 model in it.
    If Ensemble has only 1 model in it, then Ensemble shouldn't be used as best model
    """
    return True

Ensemble class

stack models to build level 2 ensemble.

Source code in supervised\ensemble.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
class Ensemble:
    """
    stack models to build level 2 ensemble.
    """

    algorithm_name = "Greedy Ensemble"
    algorithm_short_name = "Ensemble"

    def __init__(
        self,
        optimize_metric="logloss",
        ml_task=BINARY_CLASSIFICATION,
        is_stacked=False,
        max_single_prediction_time=None,
        fairness_metric=None,
        fairness_threshold=None,
        privileged_groups=None,
        underprivileged_groups=None,
    ):
        self.library_version = "0.1"
        self.uid = str(uuid.uuid4())

        self.metric = Metric({"name": optimize_metric})
        self.best_loss = self.metric.get_maximum()  # the best loss obtained by ensemble
        self.models_map = None
        self.selected_models = []
        self.train_time = None
        self.total_best_sum = None  # total sum of predictions, the oof of ensemble
        self.target = None
        self.target_columns = None
        self.sample_weight = None
        self._ml_task = ml_task
        self._optimize_metric = optimize_metric
        self._is_stacked = is_stacked

        self._additional_metrics = None
        self._threshold = None
        self._name = "Ensemble_Stacked" if is_stacked else "Ensemble"
        self._scores = []
        self.oof_predictions = None
        self._oof_predictions_fname = None
        self._single_prediction_time = None  # prediction time on single sample
        self._max_single_prediction_time = max_single_prediction_time
        self.model_prediction_time = {}

        self._fairness_metric = fairness_metric
        self._fairness_threshold = fairness_threshold
        self._privileged_groups = privileged_groups
        self._underprivileged_groups = underprivileged_groups
        self._is_fair = None
        self.sensitive_features = None

    def get_train_time(self):
        return self.train_time

    def get_final_loss(self):
        return self.best_loss

    def is_valid(self):
        return len(self.selected_models) > 1

    def is_fast_enough(self, max_single_prediction_time):
        # dont need to check
        if max_single_prediction_time is None:
            return True

        # no iformation about prediction time
        if self._single_prediction_time is None:
            return True

        return self._single_prediction_time < max_single_prediction_time

    def get_type(self):
        prefix = ""  # "Stacked" if self._is_stacked else ""
        return prefix + self.algorithm_short_name

    def get_name(self):
        return self._name

    def involved_model_names(self):
        """Returns the list of all models involved in the current model.
        For single model, it returns the list with the name of the model.
        For ensemble model, it returns the list with the name of the ensemble and all internal models
        (used to build ensemble).
        For single model but trained on stacked data, it returns the list with the name of the model
        (names of models used in stacking are not included)."""
        if self.selected_models is None or not self.selected_models:
            return [self._name]
        l = []
        for m in self.selected_models:
            l += m["model"].involved_model_names()
        return [self._name] + l

    def get_metric_name(self):
        return self.metric.name

    def get_metric(self):
        return self.metric

    def get_out_of_folds(self):
        """Needed when ensemble is treated as model and we want to compute additional metrics for it"""
        # single prediction (in case of binary classification and regression)
        if self.oof_predictions is not None:
            return self.oof_predictions.copy(deep=True)

        if self._oof_predictions_fname is not None:
            self.oof_predictions = pd.read_csv(self._oof_predictions_fname)
            return self.oof_predictions.copy(deep=True)

        ensemble_oof = pd.DataFrame(
            data=self.total_best_sum, columns=self.total_best_sum.columns
        )
        ensemble_oof["target"] = self.target
        if self.sample_weight is not None:
            ensemble_oof["sample_weight"] = self.sample_weight

        # if self.sensitive_features is not None:
        #    for col in self.sensitive_features.columns:
        #        ensemble_oof[col] = self.sensitive_features[col]

        self.oof_predictions = ensemble_oof
        return ensemble_oof

    def _get_mean(self, oof_selected, best_sum, best_count):
        resp = copy.deepcopy(oof_selected)
        if best_count > 1:
            resp += best_sum
            resp /= float(best_count)
        return resp

    def get_oof_matrix(self, models):
        # remember models, will be needed in predictions
        self.models_map = {m.get_name(): m for m in models}

        if self._max_single_prediction_time is not None:
            self.model_prediction_time = {
                m.get_name(): m._single_prediction_time for m in models
            }

            if not [
                m for m in models if m.is_fast_enough(self._max_single_prediction_time)
            ]:
                raise NotTrainedException(
                    "Can't contruct ensemble with prediction time smaller than limit."
                )

        # check if we can construct fair ensemble
        if self._fairness_metric is not None:
            if not [m for m in models if m.is_fair()]:
                raise NotTrainedException("Can't contruct fair ensemble.")

        oofs = {}
        sensitive_features = None
        for m in models:
            # do not use model with RandomFeature
            if "RandomFeature" in m.get_name():
                continue

            # ensemble only the same level of stack
            # if m._is_stacked != self._is_stacked:
            #    continue
            oof = m.get_out_of_folds()
            prediction_cols = [c for c in oof.columns if "prediction" in c]
            oofs[m.get_name()] = oof[prediction_cols]  # oof["prediction"]
            if self.target is None:
                self.target_columns = [c for c in oof.columns if "target" in c]
                self.target = oof[
                    self.target_columns
                ]  # it will be needed for computing advance model statistics

            if self.sample_weight is None and "sample_weight" in oof.columns:
                self.sample_weight = oof["sample_weight"]

            sensitive_cols = [c for c in oof.columns if "sensitive" in c]
            if sensitive_cols and sensitive_features is None:
                sensitive_features = oof[sensitive_cols]

        return oofs, self.target, self.sample_weight, sensitive_features

    def get_additional_metrics(self):
        if self._additional_metrics is None:
            logger.debug("Get additional metrics for Ensemble")
            # 'target' - the target after processing used for model training
            # 'prediction' - out of folds predictions of the model
            oof_predictions = self.get_out_of_folds()
            prediction_cols = [c for c in oof_predictions.columns if "prediction" in c]
            target_cols = [c for c in oof_predictions.columns if "target" in c]

            oof_preds = oof_predictions[prediction_cols]
            if self._ml_task == MULTICLASS_CLASSIFICATION:
                cols = oof_preds.columns.tolist()
                # prediction_
                labels = {i: v[11:] for i, v in enumerate(cols)}

                oof_preds.loc[:, "label"] = np.argmax(
                    np.array(oof_preds[prediction_cols]), axis=1
                )
                oof_preds.loc[:, "label"] = oof_preds["label"].map(labels)

            sample_weight = None
            if "sample_weight" in oof_predictions.columns:
                sample_weight = oof_predictions["sample_weight"]

            self._additional_metrics = AdditionalMetrics.compute(
                oof_predictions[target_cols],
                oof_preds,
                sample_weight,
                self._ml_task,
                self.sensitive_features,
                self._fairness_metric
                if self._ml_task != REGRESSION
                else f"{self._fairness_metric}@{self.get_metric_name()}",
                self._fairness_threshold,
                self._privileged_groups,
                self._underprivileged_groups,
            )
            if self._ml_task == BINARY_CLASSIFICATION:
                self._threshold = float(self._additional_metrics["threshold"])

        return self._additional_metrics

    def get_sensitive_features_names(self):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return [i for i in list(fm.keys()) if i != "fairness_optimization"]

    def get_fairness_metric(self, col_name):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return fm.get(col_name, {}).get("fairness_metric_value")

    def get_fairness_optimization(self):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return fm.get("fairness_optimization", {})

    def get_worst_fairness(self):
        # We have fairness metrics per sensitive feature.
        # The worst fairness metric is:
        # - for ratio metrics, the lowest fairness value from all sensitive features
        # - for difference metrics, the highest fairness value from all sensitive features
        # It is needed as bias mitigation stop criteria.

        metrics = self.get_additional_metrics()

        fm = metrics.get("fairness_metrics", {})
        worst_value = None
        for col_name, values in fm.items():
            if col_name == "fairness_optimization":
                continue
            if "ratio" in self._fairness_metric.lower():
                if worst_value is None:
                    worst_value = values.get("fairness_metric_value", 0)
                else:
                    worst_value = min(
                        worst_value, values.get("fairness_metric_value", 0)
                    )
            else:
                if worst_value is None:
                    worst_value = values.get("fairness_metric_value", 1)
                else:
                    worst_value = max(
                        worst_value, values.get("fairness_metric_value", 1)
                    )

        return worst_value

    def get_best_fairness(self):
        # We have fairness metrics per sensitive feature.
        # The best fairness metric is:
        # - for ratio metrics, the highest fairness value from all sensitive features
        # - for difference metrics, the lowest fairness value from all sensitive features
        # It is needed as bias mitigation stop criteria.

        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        best_value = None
        for col_name, values in fm.items():
            if col_name == "fairness_optimization":
                continue
            if "ratio" in self._fairness_metric.lower():
                if best_value is None:
                    best_value = values.get("fairness_metric_value", 0)
                else:
                    best_value = max(best_value, values.get("fairness_metric_value", 0))
            else:
                if best_value is None:
                    best_value = values.get("fairness_metric_value", 1)
                else:
                    best_value = min(best_value, values.get("fairness_metric_value", 1))

        return best_value

    def is_fair(self):
        if self._is_fair is not None:
            return self._is_fair
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        for col, m in fm.items():
            if col == "fairness_optimization":
                continue
            if not m.get("is_fair", True):
                self._is_fair = False
                return False
        self._is_fair = True
        return False

    def fit(self, oofs, y, sample_weight=None, sensitive_features=None):
        logger.debug("Ensemble.fit")
        self.sensitive_features = sensitive_features
        start_time = time.time()
        selected_algs_cnt = 0  # number of selected algorithms
        self.best_algs = []  # selected algoritms indices from each loop

        total_prediction_time = 0
        best_sum = None  # sum of best algorihtms
        for j in range(len(oofs)):  # iterate over all solutions
            min_score = self.metric.get_maximum()
            best_model = None
            # try to add some algorithm to the best_sum to minimize metric
            for model_name in oofs.keys():
                if (
                    self._max_single_prediction_time
                    and model_name in self.model_prediction_time
                ):
                    if (
                        total_prediction_time + self.model_prediction_time[model_name]
                        > self._max_single_prediction_time
                    ):
                        continue
                # skip unfair models
                if (
                    self._fairness_metric is not None
                    and not self.models_map[model_name].is_fair()
                ):
                    continue
                y_ens = self._get_mean(oofs[model_name], best_sum, j + 1)
                score = self.metric(y, y_ens, sample_weight)
                if self.metric.improvement(previous=min_score, current=score):
                    min_score = score
                    best_model = model_name

            if best_model is None:
                continue
            # there is improvement, save it
            # save scores for plotting learning curve
            # if we optimize negative, then we need to multiply by -1.0
            # to save correct values in the learning curve
            sign = -1.0 if Metric.optimize_negative(self.metric.name) else 1.0
            self._scores += [sign * min_score]

            if self.metric.improvement(previous=self.best_loss, current=min_score):
                self.best_loss = min_score
                selected_algs_cnt = j

            self.best_algs.append(best_model)  # save the best algoritm
            # update best_sum value
            best_sum = (
                oofs[best_model] if best_sum is None else best_sum + oofs[best_model]
            )
            if j == selected_algs_cnt:
                self.total_best_sum = copy.deepcopy(best_sum)

            # update prediction time estimate
            if self._max_single_prediction_time is not None:
                total_prediction_time = np.sum(
                    [
                        self.model_prediction_time[name]
                        for name in np.unique(self.best_algs)
                    ]
                )
        # end of main loop #

        if not self.best_algs:
            raise NotTrainedException("Ensemble wasn't fitted.")

        # keep oof predictions of ensemble
        self.total_best_sum /= float(selected_algs_cnt + 1)
        self.best_algs = self.best_algs[: (selected_algs_cnt + 1)]

        logger.debug("Selected models for ensemble:")
        for model_name in np.unique(self.best_algs):
            self.selected_models += [
                {
                    "model": self.models_map[model_name],
                    "repeat": float(self.best_algs.count(model_name)),
                }
            ]
            logger.debug(f"{model_name} {self.best_algs.count(model_name)}")

        self._additional_metrics = self.get_additional_metrics()

        self.train_time = time.time() - start_time

    def predict(self, X, X_stacked=None):
        logger.debug(
            "Ensemble.predict with {} models".format(len(self.selected_models))
        )
        y_predicted_ensemble = None
        total_repeat = 0.0

        for selected in self.selected_models:
            model = selected["model"]
            repeat = selected["repeat"]
            total_repeat += repeat

            if model._is_stacked:
                y_predicted_from_model = model.predict(X_stacked)
            else:
                y_predicted_from_model = model.predict(X)

            prediction_cols = []
            if self._ml_task in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION]:
                prediction_cols = [
                    c for c in y_predicted_from_model.columns if "prediction_" in c
                ]
            else:  # REGRESSION
                prediction_cols = ["prediction"]
            y_predicted_from_model = y_predicted_from_model[prediction_cols]
            y_predicted_ensemble = (
                y_predicted_from_model * repeat
                if y_predicted_ensemble is None
                else y_predicted_ensemble + y_predicted_from_model * repeat
            )

        y_predicted_ensemble /= total_repeat

        if self._ml_task == MULTICLASS_CLASSIFICATION:
            cols = y_predicted_ensemble.columns.tolist()
            # prediction_
            labels = {i: v[11:] for i, v in enumerate(cols)}

            y_predicted_ensemble["label"] = np.argmax(
                np.array(y_predicted_ensemble[prediction_cols]), axis=1
            )
            y_predicted_ensemble["label"] = y_predicted_ensemble["label"].map(labels)

        return y_predicted_ensemble

    def to_json(self):
        models_json = []
        for selected in self.selected_models:
            model = selected["model"]
            repeat = selected["repeat"]
            models_json += [{"model": model.to_json(), "repeat": repeat}]

        json_desc = {
            "library_version": self.library_version,
            "algorithm_name": self.algorithm_name,
            "algorithm_short_name": self.algorithm_short_name,
            "uid": self.uid,
            "models": models_json,
        }
        return json_desc

    def from_json(self, json_desc):
        self.library_version = json_desc.get("library_version", self.library_version)
        self.algorithm_name = json_desc.get("algorithm_name", self.algorithm_name)
        self.algorithm_short_name = json_desc.get(
            "algorithm_short_name", self.algorithm_short_name
        )
        self.uid = json_desc.get("uid", self.uid)
        self.selected_models = []
        models_json = json_desc.get("models")
        for selected in models_json:
            model = selected["model"]
            repeat = selected["repeat"]

            il = ModelFramework(model.get("params"))
            il.from_json(model)
            self.selected_models += [
                # {"model": LearnerFactory.load(model), "repeat": repeat}
                {"model": il, "repeat": repeat}
            ]

    def save(self, results_path, model_subpath):
        model_path = os.path.join(results_path, model_subpath)
        logger.info(f"Save the ensemble to {model_path}")

        predictions = self.get_out_of_folds()
        predictions_fname = os.path.join(model_subpath, f"predictions_ensemble.csv")
        self._oof_predictions_fname = os.path.join(results_path, predictions_fname)
        predictions.to_csv(self._oof_predictions_fname, index=False)

        with open(os.path.join(model_path, "ensemble.json"), "w") as fout:
            ms = []
            for selected in self.selected_models:
                ms += [{"model": selected["model"]._name, "repeat": selected["repeat"]}]

            desc = {
                "name": self._name,
                "ml_task": self._ml_task,
                "optimize_metric": self._optimize_metric,
                "selected_models": ms,
                "predictions_fname": predictions_fname,
                "metric_name": self.get_metric_name(),
                "final_loss": self.get_final_loss(),
                "train_time": self.get_train_time(),
                "is_stacked": self._is_stacked,
            }

            if self._threshold is not None:
                desc["threshold"] = self._threshold
            fout.write(json.dumps(desc, indent=4))

        LearningCurves.plot_for_ensemble(self._scores, self.metric.name, model_path)

        # call additional metics just to be sure they are computed
        self._additional_metrics = self.get_additional_metrics()

        AdditionalMetrics.save(
            self._additional_metrics, self._ml_task, self.model_markdown(), model_path
        )

        with open(os.path.join(model_path, "status.txt"), "w") as fout:
            fout.write("ALL OK!")

    def model_markdown(self):
        select_models_desc = []
        for selected in self.selected_models:
            select_models_desc += [
                {"model": selected["model"]._name, "repeat": selected["repeat"]}
            ]
        desc = f"# Summary of {self.get_name()}\n\n"
        desc += "[<< Go back](../README.md)\n\n"
        desc += "\n## Ensemble structure\n"
        selected = pd.DataFrame(select_models_desc)
        desc += tabulate(selected.values, ["Model", "Weight"], tablefmt="pipe")
        desc += "\n"
        return desc

    @staticmethod
    def load(results_path, model_subpath, models_map):
        model_path = os.path.join(results_path, model_subpath)
        logger.info(f"Loading ensemble from {model_path}")

        json_desc = json.load(open(os.path.join(model_path, "ensemble.json")))

        ensemble = Ensemble(json_desc.get("optimize_metric"), json_desc.get("ml_task"))
        ensemble._name = json_desc.get("name", ensemble._name)
        ensemble._threshold = json_desc.get("threshold", ensemble._threshold)
        for m in json_desc.get("selected_models", []):
            ensemble.selected_models += [
                {"model": models_map[m["model"]], "repeat": m["repeat"]}
            ]

        ensemble.best_loss = json_desc.get("final_loss", ensemble.best_loss)
        ensemble.train_time = json_desc.get("train_time", ensemble.train_time)
        ensemble._is_stacked = json_desc.get("is_stacked", ensemble._is_stacked)
        predictions_fname = json_desc.get("predictions_fname")
        if predictions_fname is not None:
            ensemble._oof_predictions_fname = os.path.join(
                results_path, predictions_fname
            )

        return ensemble

get_out_of_folds()

Needed when ensemble is treated as model and we want to compute additional metrics for it

Source code in supervised\ensemble.py
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
def get_out_of_folds(self):
    """Needed when ensemble is treated as model and we want to compute additional metrics for it"""
    # single prediction (in case of binary classification and regression)
    if self.oof_predictions is not None:
        return self.oof_predictions.copy(deep=True)

    if self._oof_predictions_fname is not None:
        self.oof_predictions = pd.read_csv(self._oof_predictions_fname)
        return self.oof_predictions.copy(deep=True)

    ensemble_oof = pd.DataFrame(
        data=self.total_best_sum, columns=self.total_best_sum.columns
    )
    ensemble_oof["target"] = self.target
    if self.sample_weight is not None:
        ensemble_oof["sample_weight"] = self.sample_weight

    # if self.sensitive_features is not None:
    #    for col in self.sensitive_features.columns:
    #        ensemble_oof[col] = self.sensitive_features[col]

    self.oof_predictions = ensemble_oof
    return ensemble_oof

involved_model_names()

Returns the list of all models involved in the current model. For single model, it returns the list with the name of the model. For ensemble model, it returns the list with the name of the ensemble and all internal models (used to build ensemble). For single model but trained on stacked data, it returns the list with the name of the model (names of models used in stacking are not included).

Source code in supervised\ensemble.py
109
110
111
112
113
114
115
116
117
118
119
120
121
def involved_model_names(self):
    """Returns the list of all models involved in the current model.
    For single model, it returns the list with the name of the model.
    For ensemble model, it returns the list with the name of the ensemble and all internal models
    (used to build ensemble).
    For single model but trained on stacked data, it returns the list with the name of the model
    (names of models used in stacking are not included)."""
    if self.selected_models is None or not self.selected_models:
        return [self._name]
    l = []
    for m in self.selected_models:
        l += m["model"].involved_model_names()
    return [self._name] + l

Self-training

AutoST class

Automated Self-training for semi-supervised tasks.

Source code in semisupervised\autost.py
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
class AutoST:
    """
    Automated Self-training for semi-supervised tasks.
    """

    def __init__(self,
                 num_iterations,
                 entropy_filter_percentile,
                 lambda_uncertainty_final,
                 prob_threshold_final,
                 pseudo_label_ratio_final,
                 lambda_uncertainty_initial,
                 prob_threshold_initial,
                 pseudo_label_ratio_initial,
                 **kwargs):
        self.num_iterations = num_iterations
        self.entropy_filter_percentile = entropy_filter_percentile
        self.lambda_uncertainty_min = lambda_uncertainty_final
        self.prob_threshold_min = prob_threshold_final
        self.pseudo_label_ratio_min = pseudo_label_ratio_final
        self.lambda_uncertainty_max = lambda_uncertainty_initial
        self.prob_threshold_max = prob_threshold_initial
        self.pseudo_label_ratio_max = pseudo_label_ratio_initial
        self.kwargs = kwargs
        self.results_path = kwargs.get('results_path', '')

    def fit(self, X_labeled, y_labeled, X_unlabeled):
        # Create the results folder if it does not exist
        if not os.path.exists(self.results_path):
            os.makedirs(self.results_path)
            print(f"Results folder created: {self.results_path}")

        # Calculate the step size for each parameter
        lambda_uncertainty_step = (self.lambda_uncertainty_max - self.lambda_uncertainty_min) / self.num_iterations
        prob_threshold_step = (self.prob_threshold_max - self.prob_threshold_min) / self.num_iterations
        pseudo_label_ratio_step = (self.pseudo_label_ratio_max - self.pseudo_label_ratio_min) / self.num_iterations

        for i in range(self.num_iterations):
            print(f'****Self_training epoch {i} start****')

            # Update the parameters for this iteration
            lambda_uncertainty = self.lambda_uncertainty_min + i * lambda_uncertainty_step
            prob_threshold = self.prob_threshold_min + i * prob_threshold_step
            pseudo_label_ratio = self.pseudo_label_ratio_min + i * pseudo_label_ratio_step

            # Update the results_path for this iteration
            self.kwargs['results_path'] = f"{self.results_path}/iteration_{i+1}"

            # Initialize the AutoML system with the updated results_path
            automl = AutoML(**self.kwargs)

            # Train the model on the labeled data
            automl.fit(X_labeled, y_labeled)

            # Get the prediction probabilities of the unlabeled data
            pred_prob = automl.predict_proba(X_unlabeled)

            # Compute the entropy of the prediction probabilities
            entropy_uncertainty = -np.sum(pred_prob * np.log(pred_prob + 1e-16), axis=1)

            # Compute the uncertainty-corrected prediction probabilities
            pred_prob_corrected = pred_prob - lambda_uncertainty * entropy_uncertainty[:, np.newaxis]

            # Compute the pseudo-labels based on the corrected prediction probabilities
            pseudo_labels = np.argmax(pred_prob_corrected, axis=1)

            # Compute the percentile of the entropy_uncertainty
            entropy_uncertainty_percentile = np.percentile(
                entropy_uncertainty, 100 - self.entropy_filter_percentile
            )

            # Only select the samples where the entropy_uncertainty is below the 97th percentile
            entropy_filter = (entropy_uncertainty < entropy_uncertainty_percentile)

            # Combine the original condition with the entropy filter
            selected_samples = (np.max(pred_prob_corrected, axis=1) > prob_threshold) & entropy_filter

            # Create a DataFrame from the unlabeled data and pseudo-labels
            data_unlabeled = pd.DataFrame(X_unlabeled[selected_samples])
            data_unlabeled['pseudo_label'] = pseudo_labels[selected_samples]

            # Downsample the data
            data_unlabeled_downsampled = data_unlabeled.sample(frac=pseudo_label_ratio, random_state=42)

            # Extract the downsampled data and labels
            X_unlabeled_selected = data_unlabeled_downsampled.drop(columns='pseudo_label')
            pseudo_labels_selected = data_unlabeled_downsampled['pseudo_label']

            # Combine the pseudo-labeled and labeled data
            X_labeled = pd.concat([X_labeled, X_unlabeled_selected])
            y_labeled = pd.concat([y_labeled, pd.Series(pseudo_labels_selected)])

            print(f'****Self_training epoch {i} finish****')