Skip to contents

Exhaustive search generates all possible feature sets.

The feature selection terminates itself when all feature sets are evaluated. It is not necessary to set a termination criterion.

Dictionary

This FSelector can be instantiated via the dictionary mlr_fselectors or with the associated sugar function fs():

mlr_fselectors$get("exhaustive_search")
fs("exhaustive_search")

Parameters

max_features

integer(1)
Maximum number of features. By default, number of features in mlr3::Task.

Super class

mlr3fselect::FSelector -> FSelectorExhaustiveSearch

Methods

Inherited methods


Method new()

Creates a new instance of this R6 class.


Method clone()

The objects of this class are cloneable with this method.

Usage

FSelectorExhaustiveSearch$clone(deep = FALSE)

Arguments

deep

Whether to make a deep clone.

Examples

# retrieve task
task = tsk("pima")

# load learner
learner = lrn("classif.rpart")

# \donttest{
# feature selection on the pima indians diabetes data set
instance = fselect(
  method = "exhaustive_search",
  task = task,
  learner = learner,
  resampling = rsmp("holdout"),
  measure = msr("classif.ce"),
  term_evals = 10
)

# best performing feature subset
instance$result
#>      age glucose insulin  mass pedigree pregnant pressure triceps
#> 1: FALSE    TRUE   FALSE FALSE    FALSE     TRUE    FALSE   FALSE
#>            features classif.ce
#> 1: glucose,pregnant  0.2265625

# all evaluated feature subsets
as.data.table(instance$archive)
#>       age glucose insulin  mass pedigree pregnant pressure triceps classif.ce
#>  1:  TRUE   FALSE   FALSE FALSE    FALSE    FALSE    FALSE   FALSE  0.3515625
#>  2: FALSE    TRUE   FALSE FALSE    FALSE    FALSE    FALSE   FALSE  0.2382812
#>  3: FALSE   FALSE    TRUE FALSE    FALSE    FALSE    FALSE   FALSE  0.3476562
#>  4: FALSE   FALSE   FALSE  TRUE    FALSE    FALSE    FALSE   FALSE  0.3007812
#>  5: FALSE   FALSE   FALSE FALSE     TRUE    FALSE    FALSE   FALSE  0.3203125
#>  6: FALSE   FALSE   FALSE FALSE    FALSE     TRUE    FALSE   FALSE  0.3164062
#>  7: FALSE   FALSE   FALSE FALSE    FALSE    FALSE     TRUE   FALSE  0.3203125
#>  8: FALSE   FALSE   FALSE FALSE    FALSE    FALSE    FALSE    TRUE  0.3359375
#>  9:  TRUE    TRUE   FALSE FALSE    FALSE    FALSE    FALSE   FALSE  0.2500000
#> 10:  TRUE   FALSE    TRUE FALSE    FALSE    FALSE    FALSE   FALSE  0.3320312
#> 11:  TRUE   FALSE   FALSE  TRUE    FALSE    FALSE    FALSE   FALSE  0.3046875
#> 12:  TRUE   FALSE   FALSE FALSE     TRUE    FALSE    FALSE   FALSE  0.3125000
#> 13:  TRUE   FALSE   FALSE FALSE    FALSE     TRUE    FALSE   FALSE  0.3085938
#> 14:  TRUE   FALSE   FALSE FALSE    FALSE    FALSE     TRUE   FALSE  0.3281250
#> 15:  TRUE   FALSE   FALSE FALSE    FALSE    FALSE    FALSE    TRUE  0.3476562
#> 16: FALSE    TRUE    TRUE FALSE    FALSE    FALSE    FALSE   FALSE  0.2382812
#> 17: FALSE    TRUE   FALSE  TRUE    FALSE    FALSE    FALSE   FALSE  0.2617188
#> 18: FALSE    TRUE   FALSE FALSE     TRUE    FALSE    FALSE   FALSE  0.2500000
#> 19: FALSE    TRUE   FALSE FALSE    FALSE     TRUE    FALSE   FALSE  0.2265625
#> 20: FALSE    TRUE   FALSE FALSE    FALSE    FALSE     TRUE   FALSE  0.2382812
#> 21: FALSE    TRUE   FALSE FALSE    FALSE    FALSE    FALSE    TRUE  0.2539062
#> 22: FALSE   FALSE    TRUE  TRUE    FALSE    FALSE    FALSE   FALSE  0.3085938
#> 23: FALSE   FALSE    TRUE FALSE     TRUE    FALSE    FALSE   FALSE  0.3476562
#> 24: FALSE   FALSE    TRUE FALSE    FALSE     TRUE    FALSE   FALSE  0.3242188
#> 25: FALSE   FALSE    TRUE FALSE    FALSE    FALSE     TRUE   FALSE  0.3632812
#> 26: FALSE   FALSE    TRUE FALSE    FALSE    FALSE    FALSE    TRUE  0.3359375
#> 27: FALSE   FALSE   FALSE  TRUE     TRUE    FALSE    FALSE   FALSE  0.3242188
#> 28: FALSE   FALSE   FALSE  TRUE    FALSE     TRUE    FALSE   FALSE  0.2890625
#> 29: FALSE   FALSE   FALSE  TRUE    FALSE    FALSE     TRUE   FALSE  0.3554688
#> 30: FALSE   FALSE   FALSE  TRUE    FALSE    FALSE    FALSE    TRUE  0.3007812
#> 31: FALSE   FALSE   FALSE FALSE     TRUE     TRUE    FALSE   FALSE  0.3085938
#> 32: FALSE   FALSE   FALSE FALSE     TRUE    FALSE     TRUE   FALSE  0.3476562
#> 33: FALSE   FALSE   FALSE FALSE     TRUE    FALSE    FALSE    TRUE  0.3359375
#> 34: FALSE   FALSE   FALSE FALSE    FALSE     TRUE     TRUE   FALSE  0.3359375
#> 35: FALSE   FALSE   FALSE FALSE    FALSE     TRUE    FALSE    TRUE  0.3046875
#> 36: FALSE   FALSE   FALSE FALSE    FALSE    FALSE     TRUE    TRUE  0.3671875
#>       age glucose insulin  mass pedigree pregnant pressure triceps classif.ce
#>     runtime_learners           timestamp batch_nr      resample_result
#>  1:            0.073 2022-08-25 10:40:28        1 <ResampleResult[21]>
#>  2:            0.065 2022-08-25 10:40:28        1 <ResampleResult[21]>
#>  3:            0.060 2022-08-25 10:40:28        1 <ResampleResult[21]>
#>  4:            0.105 2022-08-25 10:40:28        1 <ResampleResult[21]>
#>  5:            0.068 2022-08-25 10:40:28        1 <ResampleResult[21]>
#>  6:            0.059 2022-08-25 10:40:28        1 <ResampleResult[21]>
#>  7:            0.074 2022-08-25 10:40:28        1 <ResampleResult[21]>
#>  8:            0.068 2022-08-25 10:40:28        1 <ResampleResult[21]>
#>  9:            0.068 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 10:            0.083 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 11:            0.064 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 12:            0.075 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 13:            0.069 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 14:            0.060 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 15:            0.080 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 16:            0.059 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 17:            0.062 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 18:            0.083 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 19:            0.061 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 20:            0.071 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 21:            0.070 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 22:            0.061 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 23:            0.079 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 24:            0.063 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 25:            0.063 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 26:            0.084 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 27:            0.061 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 28:            0.077 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 29:            0.069 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 30:            0.062 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 31:            0.116 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 32:            0.065 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 33:            0.062 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 34:            0.084 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 35:            0.061 2022-08-25 10:40:33        2 <ResampleResult[21]>
#> 36:            0.062 2022-08-25 10:40:33        2 <ResampleResult[21]>
#>     runtime_learners           timestamp batch_nr      resample_result

# subset the task and fit the final model
task$select(instance$result_feature_set)
learner$train(task)
# }