├── .Rbuildignore ├── .editorconfig ├── .github ├── ISSUE_TEMPLATE.md ├── ISSUE_TEMPLATE │ ├── bug.md │ ├── miscellaneous.md │ └── question.md ├── PULL_REQUEST_TEMPLATE.md └── stale.yml ├── .gitignore ├── .ignore ├── .lintr ├── .pre-commit-config.yaml ├── DESCRIPTION ├── LICENSE ├── NAMESPACE ├── NEWS.md ├── R ├── Aggregation.R ├── BaggingWrapper.R ├── BaseEnsemble.R ├── BaseEnsemble_operators.R ├── BaseWrapper.R ├── BaseWrapper_operators.R ├── BenchmarkResultOrderLevels.R ├── BenchmarkResult_operators.R ├── ChainModel.R ├── ChainModel_operators.R ├── ClassifTask.R ├── ClassificationViaRegressionWrapper.R ├── ClusterTask.R ├── ConstantClassWrapper.R ├── CostSensClassifWrapper.R ├── CostSensRegrWrapper.R ├── CostSensTask.R ├── CostSensWeightedPairsWrapper.R ├── DownsampleWrapper.R ├── DummyFeaturesWrapper.R ├── FailureModel.R ├── FeatSelControl.R ├── FeatSelControlExhaustive.R ├── FeatSelControlGA.R ├── FeatSelControlRandom.R ├── FeatSelControlSequential.R ├── FeatSelResult.R ├── FeatSelWrapper.R ├── Filter.R ├── FilterEnsemble.R ├── FilterWrapper.R ├── HoldoutInstance_make_fixed.R ├── HomogeneousEnsemble.R ├── Impute.R ├── ImputeMethods.R ├── ImputeWrapper.R ├── Learner.R ├── Learner_operators.R ├── Learner_properties.R ├── Measure.R ├── Measure_colAUC.R ├── Measure_custom_resampled.R ├── Measure_make_cost.R ├── Measure_operators.R ├── Measure_properties.R ├── ModelMultiplexer.R ├── ModelMultiplexerParamSet.R ├── MulticlassWrapper.R ├── MultilabelBinaryRelevanceWrapper.R ├── MultilabelClassifierChainsWrapper.R ├── MultilabelDBRWrapper.R ├── MultilabelNestedStackingWrapper.R ├── MultilabelStackingWrapper.R ├── MultilabelTask.R ├── NoFeaturesModel.R ├── OptControl.R ├── OptResult.R ├── OptWrapper.R ├── OverBaggingWrapper.R ├── OverUnderSampling.R ├── OverUndersampleWrapper.R ├── Prediction.R ├── Prediction_operators.R ├── PreprocWrapper.R ├── PreprocWrapperCaret.R ├── RLearner.R ├── RLearner_classif_C50.R ├── RLearner_classif_FDboost.R ├── RLearner_classif_IBk.R ├── RLearner_classif_J48.R ├── RLearner_classif_JRip.R ├── RLearner_classif_LiblineaRL1L2SVC.R ├── RLearner_classif_LiblineaRL1LogReg.R ├── RLearner_classif_LiblineaRL2L1SVC.R ├── RLearner_classif_LiblineaRL2LogReg.R ├── RLearner_classif_LiblineaRL2SVC.R ├── RLearner_classif_LiblineaRMultiClassSVC.R ├── RLearner_classif_OneR.R ├── RLearner_classif_PART.R ├── RLearner_classif_RRF.R ├── RLearner_classif_ada.R ├── RLearner_classif_adaboostm1.R ├── RLearner_classif_binomial.R ├── RLearner_classif_boosting.R ├── RLearner_classif_bst.R ├── RLearner_classif_cforest.R ├── RLearner_classif_clusterSVM.R ├── RLearner_classif_ctree.R ├── RLearner_classif_cvglmnet.R ├── RLearner_classif_dbnDNN.R ├── RLearner_classif_dcSVM.R ├── RLearner_classif_earth.R ├── RLearner_classif_evtree.R ├── RLearner_classif_fdausc.glm.R ├── RLearner_classif_fdausc.kernel.R ├── RLearner_classif_fdausc.knn.R ├── RLearner_classif_fdausc.np.R ├── RLearner_classif_featureless.R ├── RLearner_classif_fgam.R ├── RLearner_classif_fnn.R ├── RLearner_classif_gamboost.R ├── RLearner_classif_gaterSVM.R ├── RLearner_classif_gausspr.R ├── RLearner_classif_gbm.R ├── RLearner_classif_glmboost.R ├── RLearner_classif_glmnet.R ├── RLearner_classif_h2odeeplearning.R ├── RLearner_classif_h2ogbm.R ├── RLearner_classif_h2oglm.R ├── RLearner_classif_h2orandomForest.R ├── RLearner_classif_kknn.R ├── RLearner_classif_knn.R ├── RLearner_classif_ksvm.R ├── RLearner_classif_lda.R ├── RLearner_classif_logreg.R ├── RLearner_classif_lssvm.R ├── RLearner_classif_lvq1.R ├── RLearner_classif_mda.R ├── RLearner_classif_mlp.R ├── RLearner_classif_multinom.R ├── RLearner_classif_naiveBayes.R ├── RLearner_classif_neuralnet.R ├── RLearner_classif_nnTrain.R ├── RLearner_classif_nnet.R ├── RLearner_classif_pamr.R ├── RLearner_classif_penalized.R ├── RLearner_classif_plr.R ├── RLearner_classif_plsdaCaret.R ├── RLearner_classif_probit.R ├── RLearner_classif_qda.R ├── RLearner_classif_rFerns.R ├── RLearner_classif_randomForest.R ├── RLearner_classif_ranger.R ├── RLearner_classif_rda.R ├── RLearner_classif_rotationForest.R ├── RLearner_classif_rpart.R ├── RLearner_classif_saeDNN.R ├── RLearner_classif_sda.R ├── RLearner_classif_sparseLDA.R ├── RLearner_classif_svm.R ├── RLearner_classif_xgboost.R ├── RLearner_cluster_Cobweb.R ├── RLearner_cluster_EM.R ├── RLearner_cluster_FarthestFirst.R ├── RLearner_cluster_MiniBatchKmeans.R ├── RLearner_cluster_SimpleKMeans.R ├── RLearner_cluster_XMeans.R ├── RLearner_cluster_cmeans.R ├── RLearner_cluster_dbscan.R ├── RLearner_cluster_kkmeans.R ├── RLearner_cluster_kmeans.R ├── RLearner_multilabel_cforest.R ├── RLearner_multilabel_rFerns.R ├── RLearner_regr_FDboost.R ├── RLearner_regr_GPfit.R ├── RLearner_regr_IBk.R ├── RLearner_regr_LiblineaRL2L1SVR.R ├── RLearner_regr_LiblineaRL2L2SVR.R ├── RLearner_regr_RRF.R ├── RLearner_regr_bcart.R ├── RLearner_regr_bgp.R ├── RLearner_regr_bgpllm.R ├── RLearner_regr_blm.R ├── RLearner_regr_brnn.R ├── RLearner_regr_bst.R ├── RLearner_regr_btgp.R ├── RLearner_regr_btgpllm.R ├── RLearner_regr_btlm.R ├── RLearner_regr_cforest.R ├── RLearner_regr_crs.R ├── RLearner_regr_ctree.R ├── RLearner_regr_cubist.R ├── RLearner_regr_cvglmnet.R ├── RLearner_regr_earth.R ├── RLearner_regr_evtree.R ├── RLearner_regr_featureless.R ├── RLearner_regr_fgam.R ├── RLearner_regr_fnn.R ├── RLearner_regr_frbs.R ├── RLearner_regr_gamboost.R ├── RLearner_regr_gausspr.R ├── RLearner_regr_gbm.R ├── RLearner_regr_glm.R ├── RLearner_regr_glmboost.R ├── RLearner_regr_glmnet.R ├── RLearner_regr_h2odeeplearning.R ├── RLearner_regr_h2ogbm.R ├── RLearner_regr_h2oglm.R ├── RLearner_regr_h2orandomForest.R ├── RLearner_regr_kknn.R ├── RLearner_regr_km.R ├── RLearner_regr_ksvm.R ├── RLearner_regr_laGP.R ├── RLearner_regr_lm.R ├── RLearner_regr_mars.R ├── RLearner_regr_mob.R ├── RLearner_regr_nnet.R ├── RLearner_regr_pcr.R ├── RLearner_regr_penalized.R ├── RLearner_regr_plsr.R ├── RLearner_regr_randomForest.R ├── RLearner_regr_ranger.R ├── RLearner_regr_rpart.R ├── RLearner_regr_rsm.R ├── RLearner_regr_rvm.R ├── RLearner_regr_svm.R ├── RLearner_regr_xgboost.R ├── RLearner_surv_cforest.R ├── RLearner_surv_coxph.R ├── RLearner_surv_cvglmnet.R ├── RLearner_surv_gamboost.R ├── RLearner_surv_gbm.R ├── RLearner_surv_glmboost.R ├── RLearner_surv_glmnet.R ├── RLearner_surv_ranger.R ├── RLearner_surv_rpart.R ├── RegrTask.R ├── RemoveConstantFeaturesWrapper.R ├── ResampleDesc.R ├── ResampleInstance.R ├── ResampleInstances.R ├── ResamplePrediction.R ├── ResampleResult.R ├── ResampleResult_operators.R ├── SMOTEWrapper.R ├── StackedLearner.R ├── SupervisedTask.R ├── SurvTask.R ├── Task.R ├── TaskDesc.R ├── Task_operators.R ├── TuneControl.R ├── TuneControlCMAES.R ├── TuneControlDesign.R ├── TuneControlGenSA.R ├── TuneControlGrid.R ├── TuneControlIrace.R ├── TuneControlMBO.R ├── TuneControlRandom.R ├── TuneMultiCritControl.R ├── TuneMultiCritControlGrid.R ├── TuneMultiCritControlMBO.R ├── TuneMultiCritControlNSGA2.R ├── TuneMultiCritControlRandom.R ├── TuneMultiCritResult.R ├── TuneResult.R ├── TuneWrapper.R ├── UnsupervisedTask.R ├── WeightedClassesWrapper.R ├── WrappedModel.R ├── aggregations.R ├── analyzeFeatSelResult.R ├── asROCRPrediction.R ├── batchmark.R ├── benchmark.R ├── benchmark_helpers.R ├── cache_helpers.R ├── calculateConfusionMatrix.R ├── calculateROCMeasures.R ├── capLargeValues.R ├── checkAggrBeforeResample.R ├── checkBMRMeasure.R ├── checkLearner.R ├── checkLearnerBeforeTrain.R ├── checkMeasures.R ├── checkPrediction.R ├── checkTargetPreproc.R ├── checkTask.R ├── checkTaskSubset.R ├── checkTunerParset.R ├── configureMlr.R ├── convertBMRToRankMatrix.R ├── convertMLBenchObjToTask.R ├── convertX.R ├── createDummyFeatures.R ├── createSpatialResamplingPlots.R ├── crossover.R ├── datasets.R ├── downsample.R ├── dropFeatures.R ├── estimateResidualVariance.R ├── evalOptimizationState.R ├── extractFDAFeatures.R ├── extractFDAFeaturesMethods.R ├── extractFDAFeaturesWrapper.R ├── filterFeatures.R ├── fixDataForLearner.R ├── friedmanPostHocTestBMR.R ├── friedmanTestBMR.R ├── generateCalibration.R ├── generateFeatureImportance.R ├── generateFilterValues.R ├── generateHyperParsEffect.R ├── generateLearningCurve.R ├── generatePartialDependence.R ├── generateThreshVsPerf.R ├── getCaretParamSet.R ├── getClassWeightParam.R ├── getConfMatrix.R ├── getFeatSelResult.R ├── getFeatureImportance.R ├── getFunctionalFeatures.R ├── getHyperPars.R ├── getMultilabelBinaryPerformances.R ├── getNestedTuneResults.R ├── getOOBPreds.R ├── getParamSet.R ├── getResampleExtract.R ├── getResamplingIndices.R ├── getTaskConstructorForLearner.R ├── getTuneResult.R ├── getTuneThresholdExtra.R ├── hasFunctionalFeatures.R ├── helpLearner.R ├── helpers.R ├── helpers_FDGAMBoost.R ├── helpers_fda.R ├── joinClassLevels.R ├── learnerArgsToControl.R ├── learners.R ├── listLearners.R ├── listMeasures.R ├── logFunOpt.R ├── makeFunctionalData.R ├── makeLearner.R ├── makeLearners.R ├── measures.R ├── mergeBenchmarkResults.R ├── mergeSmallFactorLevels.R ├── mutateBits.R ├── normalizeFeatures.R ├── options.R ├── parallelization.R ├── performance.R ├── plotBMRBoxplots.R ├── plotBMRRanksAsBarChart.R ├── plotBMRSummary.R ├── plotCritDifferences.R ├── plotLearnerPrediction.R ├── plotResiduals.R ├── plotTuneMultiCritResult.R ├── predict.R ├── predictLearner.R ├── relativeOverfitting.R ├── removeConstantFeatures.R ├── removeHyperPars.R ├── resample.R ├── resample_convenience.R ├── selectFeatures.R ├── selectFeaturesExhaustive.R ├── selectFeaturesGA.R ├── selectFeaturesRandom.R ├── selectFeaturesSequential.R ├── setHyperPars.R ├── setId.R ├── setPredictThreshold.R ├── setPredictType.R ├── setThreshold.R ├── simplifyMeasureNames.R ├── smote.R ├── summarizeColumns.R ├── summarizeLevels.R ├── train.R ├── trainLearner.R ├── tuneCMAES.R ├── tuneDesign.R ├── tuneGenSA.R ├── tuneGrid.R ├── tuneIrace.R ├── tuneMBO.R ├── tuneMultiCritGrid.R ├── tuneMultiCritNSGA2.R ├── tuneMultiCritRandom.R ├── tuneParams.R ├── tuneParamsMultiCrit.R ├── tuneRandom.R ├── tuneThreshold.R ├── tunerFitnFun.R ├── utils.R ├── utils_imbalancy.R ├── utils_opt.R ├── utils_plot.R └── zzz.R ├── README.md ├── addon ├── convert_to_ascii_news.sh └── imgs │ ├── mlrLogo.png │ ├── mlrLogo_black_on_white320square.png │ └── mlrLogo_black_on_white32x32.png ├── cran-comments.md ├── data-raw ├── gunpoint.RData └── yeast.RData ├── data ├── agri.task.rda ├── bc.task.rda ├── bh.task.rda ├── costiris.task.rda ├── fuelsubset.task.rda ├── gunpoint.task.rda ├── iris.task.rda ├── lung.task.rda ├── mtcars.task.rda ├── phoneme.task.rda ├── pid.task.rda ├── sonar.task.rda ├── spam.task.rda ├── spatial.task.rda ├── wpbc.task.rda └── yeast.task.rda ├── inst ├── CITATION ├── examples │ └── MultilabelWrapper.R ├── makeData.R └── old-tutorials.zip ├── man-roxygen ├── arg_aggr.R ├── arg_aggregation_method.R ├── arg_bmr.R ├── arg_bmr_asdf.R ├── arg_bmr_drop.R ├── arg_bmr_learnerids.R ├── arg_bmr_taskids.R ├── arg_exclude.R ├── arg_facet_nrow_ncol.R ├── arg_features.R ├── arg_fsres.R ├── arg_imputey.R ├── arg_keep_extract.R ├── arg_keep_pred.R ├── arg_learner.R ├── arg_learner_classif.R ├── arg_learner_regr.R ├── arg_log_fun.R ├── arg_lrncl.R ├── arg_measure.R ├── arg_measures.R ├── arg_measures_opt.R ├── arg_models.R ├── arg_multilabel_cvfolds.R ├── arg_multilabel_order.R ├── arg_order_lrns.R ├── arg_order_tsks.R ├── arg_plotroc_obj.R ├── arg_pred.R ├── arg_predictthreshold.R ├── arg_prettynames.R ├── arg_showinfo.R ├── arg_subset.R ├── arg_target12.R ├── arg_task.R ├── arg_task_or_desc.R ├── arg_task_or_type.R ├── arg_taskdesc.R ├── arg_taskdf.R ├── arg_taskdf_target.R ├── arg_wrappedmod.R ├── ret_bmr_list_or_df.R ├── ret_gg2.R ├── ret_inv_null.R ├── ret_learner.R ├── ret_measure.R ├── ret_ps.R ├── ret_task.R ├── ret_taskdesc.R ├── ret_taskdf.R └── ret_wmodel.R ├── man ├── Aggregation.Rd ├── BenchmarkResult.Rd ├── ClassifTask.Rd ├── ClusterTask.Rd ├── ConfusionMatrix.Rd ├── CostSensTask.Rd ├── FailureModel.Rd ├── FeatSelControl.Rd ├── FeatSelResult.Rd ├── LearnerProperties.Rd ├── MeasureProperties.Rd ├── MultilabelTask.Rd ├── Prediction.Rd ├── RLearner.Rd ├── RegrTask.Rd ├── ResamplePrediction.Rd ├── ResampleResult.Rd ├── SurvTask.Rd ├── Task.Rd ├── TaskDesc.Rd ├── TuneControl.Rd ├── TuneMultiCritControl.Rd ├── TuneMultiCritResult.Rd ├── TuneResult.Rd ├── addRRMeasure.Rd ├── aggregations.Rd ├── agri.task.Rd ├── analyzeFeatSelResult.Rd ├── asROCRPrediction.Rd ├── batchmark.Rd ├── bc.task.Rd ├── benchmark.Rd ├── bh.task.Rd ├── cache_helpers.Rd ├── calculateConfusionMatrix.Rd ├── calculateROCMeasures.Rd ├── capLargeValues.Rd ├── changeData.Rd ├── checkLearner.Rd ├── checkPredictLearnerOutput.Rd ├── configureMlr.Rd ├── convertBMRToRankMatrix.Rd ├── convertMLBenchObjToTask.Rd ├── costiris.task.Rd ├── createDummyFeatures.Rd ├── createSpatialResamplingPlots.Rd ├── crossover.Rd ├── downsample.Rd ├── dropFeatures.Rd ├── estimateRelativeOverfitting.Rd ├── estimateResidualVariance.Rd ├── extractFDABsignal.Rd ├── extractFDADTWKernel.Rd ├── extractFDAFPCA.Rd ├── extractFDAFeatures.Rd ├── extractFDAFourier.Rd ├── extractFDAMultiResFeatures.Rd ├── extractFDATsfeatures.Rd ├── extractFDAWavelets.Rd ├── figures │ └── logo.png ├── filterFeatures.Rd ├── friedmanPostHocTestBMR.Rd ├── friedmanTestBMR.Rd ├── fuelsubset.task.Rd ├── generateCalibrationData.Rd ├── generateCritDifferencesData.Rd ├── generateFeatureImportanceData.Rd ├── generateFilterValuesData.Rd ├── generateHyperParsEffectData.Rd ├── generateLearningCurveData.Rd ├── generatePartialDependenceData.Rd ├── generateThreshVsPerfData.Rd ├── getBMRAggrPerformances.Rd ├── getBMRFeatSelResults.Rd ├── getBMRFilteredFeatures.Rd ├── getBMRLearnerIds.Rd ├── getBMRLearnerShortNames.Rd ├── getBMRLearners.Rd ├── getBMRMeasureIds.Rd ├── getBMRMeasures.Rd ├── getBMRModels.Rd ├── getBMRPerformances.Rd ├── getBMRPredictions.Rd ├── getBMRTaskDescriptions.Rd ├── getBMRTaskDescs.Rd ├── getBMRTaskIds.Rd ├── getBMRTuneResults.Rd ├── getCaretParamSet.Rd ├── getClassWeightParam.Rd ├── getConfMatrix.Rd ├── getDefaultMeasure.Rd ├── getFailureModelDump.Rd ├── getFailureModelMsg.Rd ├── getFeatSelResult.Rd ├── getFeatureImportance.Rd ├── getFeatureImportanceLearner.Rd ├── getFilteredFeatures.Rd ├── getFunctionalFeatures.Rd ├── getHomogeneousEnsembleModels.Rd ├── getHyperPars.Rd ├── getLearnerId.Rd ├── getLearnerModel.Rd ├── getLearnerNote.Rd ├── getLearnerPackages.Rd ├── getLearnerParVals.Rd ├── getLearnerParamSet.Rd ├── getLearnerPredictType.Rd ├── getLearnerShortName.Rd ├── getLearnerType.Rd ├── getMlrOptions.Rd ├── getMultilabelBinaryPerformances.Rd ├── getNestedTuneResultsOptPathDf.Rd ├── getNestedTuneResultsX.Rd ├── getOOBPreds.Rd ├── getOOBPredsLearner.Rd ├── getParamSet.Rd ├── getPredictionDump.Rd ├── getPredictionProbabilities.Rd ├── getPredictionResponse.Rd ├── getPredictionTaskDesc.Rd ├── getProbabilities.Rd ├── getRRDump.Rd ├── getRRPredictionList.Rd ├── getRRPredictions.Rd ├── getRRTaskDesc.Rd ├── getRRTaskDescription.Rd ├── getResamplingIndices.Rd ├── getStackedBaseLearnerPredictions.Rd ├── getTaskClassLevels.Rd ├── getTaskCosts.Rd ├── getTaskData.Rd ├── getTaskDesc.Rd ├── getTaskDescription.Rd ├── getTaskFeatureNames.Rd ├── getTaskFormula.Rd ├── getTaskId.Rd ├── getTaskNFeats.Rd ├── getTaskSize.Rd ├── getTaskTargetNames.Rd ├── getTaskTargets.Rd ├── getTaskType.Rd ├── getTuneResult.Rd ├── getTuneResultOptPath.Rd ├── gunpoint.task.Rd ├── hasFunctionalFeatures.Rd ├── hasProperties.Rd ├── helpLearner.Rd ├── helpLearnerParam.Rd ├── imputations.Rd ├── impute.Rd ├── iris.task.Rd ├── isFailureModel.Rd ├── joinClassLevels.Rd ├── learnerArgsToControl.Rd ├── learners.Rd ├── listFilterEnsembleMethods.Rd ├── listFilterMethods.Rd ├── listLearnerProperties.Rd ├── listLearners.Rd ├── listMeasureProperties.Rd ├── listMeasures.Rd ├── listTaskTypes.Rd ├── lung.task.Rd ├── makeAggregation.Rd ├── makeBaggingWrapper.Rd ├── makeBaseWrapper.Rd ├── makeChainModel.Rd ├── makeClassificationViaRegressionWrapper.Rd ├── makeConstantClassWrapper.Rd ├── makeCostMeasure.Rd ├── makeCostSensClassifWrapper.Rd ├── makeCostSensRegrWrapper.Rd ├── makeCostSensWeightedPairsWrapper.Rd ├── makeCustomResampledMeasure.Rd ├── makeDownsampleWrapper.Rd ├── makeDummyFeaturesWrapper.Rd ├── makeExtractFDAFeatMethod.Rd ├── makeExtractFDAFeatsWrapper.Rd ├── makeFeatSelWrapper.Rd ├── makeFilter.Rd ├── makeFilterEnsemble.Rd ├── makeFilterWrapper.Rd ├── makeFixedHoldoutInstance.Rd ├── makeFunctionalData.Rd ├── makeImputeMethod.Rd ├── makeImputeWrapper.Rd ├── makeLearner.Rd ├── makeLearners.Rd ├── makeMeasure.Rd ├── makeModelMultiplexer.Rd ├── makeModelMultiplexerParamSet.Rd ├── makeMulticlassWrapper.Rd ├── makeMultilabelBinaryRelevanceWrapper.Rd ├── makeMultilabelClassifierChainsWrapper.Rd ├── makeMultilabelDBRWrapper.Rd ├── makeMultilabelNestedStackingWrapper.Rd ├── makeMultilabelStackingWrapper.Rd ├── makeOverBaggingWrapper.Rd ├── makePreprocWrapper.Rd ├── makePreprocWrapperCaret.Rd ├── makeRLearner.classif.fdausc.glm.Rd ├── makeRLearner.classif.fdausc.kernel.Rd ├── makeRLearner.classif.fdausc.np.Rd ├── makeRemoveConstantFeaturesWrapper.Rd ├── makeResampleDesc.Rd ├── makeResampleInstance.Rd ├── makeSMOTEWrapper.Rd ├── makeStackedLearner.Rd ├── makeTaskDesc.Rd ├── makeTaskDescInternal.Rd ├── makeTuneControlCMAES.Rd ├── makeTuneControlDesign.Rd ├── makeTuneControlGenSA.Rd ├── makeTuneControlGrid.Rd ├── makeTuneControlIrace.Rd ├── makeTuneControlMBO.Rd ├── makeTuneControlRandom.Rd ├── makeTuneWrapper.Rd ├── makeUndersampleWrapper.Rd ├── makeWeightedClassesWrapper.Rd ├── makeWrappedModel.Rd ├── measures.Rd ├── mergeBenchmarkResults.Rd ├── mergeSmallFactorLevels.Rd ├── mlr-package.Rd ├── mlrFamilies.Rd ├── mtcars.task.Rd ├── normalizeFeatures.Rd ├── oversample.Rd ├── parallelization.Rd ├── performance.Rd ├── phoneme.task.Rd ├── pid.task.Rd ├── plotBMRBoxplots.Rd ├── plotBMRRanksAsBarChart.Rd ├── plotBMRSummary.Rd ├── plotCalibration.Rd ├── plotCritDifferences.Rd ├── plotFilterValues.Rd ├── plotHyperParsEffect.Rd ├── plotLearnerPrediction.Rd ├── plotLearningCurve.Rd ├── plotPartialDependence.Rd ├── plotROCCurves.Rd ├── plotResiduals.Rd ├── plotThreshVsPerf.Rd ├── plotTuneMultiCritResult.Rd ├── predict.WrappedModel.Rd ├── predictLearner.Rd ├── reduceBatchmarkResults.Rd ├── reextractFDAFeatures.Rd ├── reimpute.Rd ├── removeConstantFeatures.Rd ├── removeHyperPars.Rd ├── resample.Rd ├── selectFeatures.Rd ├── setAggregation.Rd ├── setHyperPars.Rd ├── setHyperPars2.Rd ├── setId.Rd ├── setLearnerId.Rd ├── setMeasurePars.Rd ├── setPredictThreshold.Rd ├── setPredictType.Rd ├── setThreshold.Rd ├── simplifyMeasureNames.Rd ├── smote.Rd ├── sonar.task.Rd ├── spam.task.Rd ├── spatial.task.Rd ├── subsetTask.Rd ├── summarizeColumns.Rd ├── summarizeLevels.Rd ├── train.Rd ├── trainLearner.Rd ├── tuneParams.Rd ├── tuneParamsMultiCrit.Rd ├── tuneThreshold.Rd ├── wpbc.task.Rd └── yeast.task.Rd ├── mlr.Rproj ├── pkgdown ├── _pkgdown.yml └── favicon │ ├── apple-touch-icon-120x120.png │ ├── apple-touch-icon-152x152.png │ ├── apple-touch-icon-180x180.png │ ├── apple-touch-icon-60x60.png │ ├── apple-touch-icon-76x76.png │ ├── apple-touch-icon.png │ ├── favicon-16x16.png │ ├── favicon-32x32.png │ └── favicon.ico ├── src ├── Makevars.win ├── init.c ├── macros.h └── smote.c ├── tests ├── figs │ ├── deps.txt │ └── featsel │ │ ├── feat-type-cols.svg │ │ ├── filter-argument.svg │ │ ├── n-show-nfeat.svg │ │ └── n-show.svg ├── testthat.R └── testthat │ ├── _snaps │ ├── base_plotResiduals │ │ ├── plotresiduals-classif.svg │ │ ├── plotresiduals-hist-bmr-pretty.svg │ │ ├── plotresiduals-hist-bmr.svg │ │ ├── plotresiduals-hist.svg │ │ ├── plotresiduals-regr.svg │ │ └── plotresiduals-scatter-bmr.svg │ └── featsel_plotFilterValues │ │ ├── feat-type-cols.svg │ │ ├── filter-argument.svg │ │ ├── n-show-nfeat.svg │ │ └── n-show.svg │ ├── helper_funs.R │ ├── helper_helpers.R │ ├── helper_learners_all.R │ ├── helper_mock_learners.R │ ├── helper_objects.R │ ├── helper_zzz.R │ ├── test_base_BaggingWrapper.R │ ├── test_base_BaseEnsemble.R │ ├── test_base_BaseWrapper.R │ ├── test_base_ClassificationViaRegressionWrapper.R │ ├── test_base_ConstantClassWrapper.R │ ├── test_base_DummyFeaturesWrapper.R │ ├── test_base_FailureModel.R │ ├── test_base_Learner.R │ ├── test_base_Learner_properties.R │ ├── test_base_ModelMultiplexer.R │ ├── test_base_MulticlassWrapper.R │ ├── test_base_NoFeaturesModel.R │ ├── test_base_PreprocWrapper.R │ ├── test_base_PreprocWrapperCaret.R │ ├── test_base_SupervisedTask.R │ ├── test_base_TaskDesc.R │ ├── test_base_TuneWrapper.R │ ├── test_base_UnsupervisedTask.R │ ├── test_base_aggregations.R │ ├── test_base_batchmark.R │ ├── test_base_benchmark.R │ ├── test_base_blocking.R │ ├── test_base_caching.R │ ├── test_base_calculateConfusionMatrix.R │ ├── test_base_calculateROCMeasures.R │ ├── test_base_capLargeValues.R │ ├── test_base_chains.R │ ├── test_base_checkData.R │ ├── test_base_checkTaskLearner.R │ ├── test_base_checkTaskSubset.R │ ├── test_base_clustering.R │ ├── test_base_configureMlr.R │ ├── test_base_convertBMRToRankMatrix.R │ ├── test_base_convertMLBenchObjToTask.R │ ├── test_base_costs.R │ ├── test_base_costsens.R │ ├── test_base_createDummyFeatures.R │ ├── test_base_createSpatialResamplingPlots.R │ ├── test_base_debugdump.R │ ├── test_base_downsample.R │ ├── test_base_dropFeatures.R │ ├── test_base_estimateResidualVariance.R │ ├── test_base_fda.R │ ├── test_base_fda_extractFDAFeatures.R │ ├── test_base_fda_extractFDAFeaturesMethods.R │ ├── test_base_fda_extractFDAFeaturesWrapper.R │ ├── test_base_fixed_indices_cv.R │ ├── test_base_generateCalibration.R │ ├── test_base_generateFeatureImportanceData.R │ ├── test_base_generateHyperParsEffect.R │ ├── test_base_generateLearningCurve.R │ ├── test_base_generatePartialDependence.R │ ├── test_base_generateThreshVsPerf.R │ ├── test_base_getCaretParamSet.R │ ├── test_base_getFeatureImportance.R │ ├── test_base_getHyperPars.R │ ├── test_base_getOOBPreds.R │ ├── test_base_getParamSet.R │ ├── test_base_getTaskData.R │ ├── test_base_getTaskFormula.R │ ├── test_base_helpLearner.R │ ├── test_base_helpers.R │ ├── test_base_hyperpars.R │ ├── test_base_imbal_overbagging.R │ ├── test_base_imbal_overundersample.R │ ├── test_base_imbal_smote.R │ ├── test_base_imbal_weightedclasses.R │ ├── test_base_impute.R │ ├── test_base_joinClassLevels.R │ ├── test_base_learnerArgsToControl.R │ ├── test_base_listLearners.R │ ├── test_base_makeLearners.R │ ├── test_base_makeTask.R │ ├── test_base_measures.R │ ├── test_base_mergeBenchmarkResults.R │ ├── test_base_mergeSmallFactorLevels.R │ ├── test_base_multilabel.R │ ├── test_base_multilabelWrapperIds.R │ ├── test_base_normalizeFeatures.R │ ├── test_base_orderBMRLevels.R │ ├── test_base_orderedfactors.R │ ├── test_base_performance.R │ ├── test_base_plotBMRBoxplots.R │ ├── test_base_plotBMRRanksAsBarChart.R │ ├── test_base_plotBMRSummary.R │ ├── test_base_plotCritDifferences.R │ ├── test_base_plotLearnerPrediction.R │ ├── test_base_plotResiduals.R │ ├── test_base_predict.R │ ├── test_base_prediction_operators.R │ ├── test_base_relativeOverfitting.R │ ├── test_base_removeConstantFeatures.R │ ├── test_base_resample.R │ ├── test_base_resample_b632.R │ ├── test_base_resample_b632plus.R │ ├── test_base_resample_bs.R │ ├── test_base_resample_convenience.R │ ├── test_base_resample_cv.R │ ├── test_base_resample_fixedwindowcv.R │ ├── test_base_resample_getResamplingIndices.R │ ├── test_base_resample_growingwindowcv.R │ ├── test_base_resample_holdout.R │ ├── test_base_resample_loo.R │ ├── test_base_resample_makeResampleDesc.R │ ├── test_base_resample_operators.R │ ├── test_base_resample_repcv.R │ ├── test_base_resample_stratify.R │ ├── test_base_resample_subsample.R │ ├── test_base_resample_weights.R │ ├── test_base_selectFeatures.R │ ├── test_base_setPredictType.R │ ├── test_base_simplifyMeasureNames.R │ ├── test_base_spcv.R │ ├── test_base_summarizeColumns.R │ ├── test_base_summarizeLevels.R │ ├── test_base_train.R │ ├── test_base_tuneThreshold.R │ ├── test_base_tuning.R │ ├── test_base_weights.R │ ├── test_classif_C50.R │ ├── test_classif_FDboost.R │ ├── test_classif_IBk.R │ ├── test_classif_J48.R │ ├── test_classif_JRip.R │ ├── test_classif_LibLineaRMultiClassSVC.R │ ├── test_classif_LiblineaRL1L2SVC.R │ ├── test_classif_LiblineaRL1LogReg.R │ ├── test_classif_LiblineaRL2L1SVC.R │ ├── test_classif_LiblineaRL2LogReg.R │ ├── test_classif_LiblineaRL2SVC.R │ ├── test_classif_OneR.R │ ├── test_classif_PART.R │ ├── test_classif_RRF.R │ ├── test_classif_ada.R │ ├── test_classif_adaboostm1.R │ ├── test_classif_binomial.R │ ├── test_classif_boost.R │ ├── test_classif_bst.R │ ├── test_classif_cforest.R │ ├── test_classif_clusterSVM.R │ ├── test_classif_ctree.R │ ├── test_classif_cvglmnet.R │ ├── test_classif_dbnDNN.R │ ├── test_classif_dcSVM.R │ ├── test_classif_earth.R │ ├── test_classif_evtree.R │ ├── test_classif_fdausc.glm.R │ ├── test_classif_fdausc.kernel.R │ ├── test_classif_fdausc.knn.R │ ├── test_classif_fdausc.np.R │ ├── test_classif_featureless.R │ ├── test_classif_fgam.R │ ├── test_classif_fnn.R │ ├── test_classif_gamboost.R │ ├── test_classif_gaterSVM.R │ ├── test_classif_gausspr.R │ ├── test_classif_gbm.R │ ├── test_classif_glmboost.R │ ├── test_classif_glmnet.R │ ├── test_classif_h2odeeplearning.R │ ├── test_classif_h2ogbm.R │ ├── test_classif_h2oglm.R │ ├── test_classif_h2orandomForest.R │ ├── test_classif_kknn.R │ ├── test_classif_knn.R │ ├── test_classif_ksvm.R │ ├── test_classif_lda.R │ ├── test_classif_logreg.R │ ├── test_classif_lssvm.R │ ├── test_classif_mda.R │ ├── test_classif_mlp.R │ ├── test_classif_multinom.R │ ├── test_classif_naiveBayes.R │ ├── test_classif_neuralnet.R │ ├── test_classif_nnTrain.R │ ├── test_classif_nnet.R │ ├── test_classif_pamr.R │ ├── test_classif_penalized.R │ ├── test_classif_plr.R │ ├── test_classif_plsdaCaret.R │ ├── test_classif_probit.R │ ├── test_classif_qda.R │ ├── test_classif_rFerns.R │ ├── test_classif_randomForest.R │ ├── test_classif_ranger.R │ ├── test_classif_rda.R │ ├── test_classif_rotationForest.R │ ├── test_classif_rpart.R │ ├── test_classif_saeDNN.R │ ├── test_classif_sda.R │ ├── test_classif_sparseLDA.R │ ├── test_classif_svm.R │ ├── test_classif_xgboost.R │ ├── test_cluster_Cobweb.R │ ├── test_cluster_EM.R │ ├── test_cluster_FarthestFirst.R │ ├── test_cluster_MiniBatchKmeans.R │ ├── test_cluster_SimpleKMeans.R │ ├── test_cluster_XMeans.R │ ├── test_cluster_cmeans.R │ ├── test_cluster_dbscan.R │ ├── test_cluster_kkmeans.R │ ├── test_cluster_kmeans.R │ ├── test_featsel_FeatSelWrapper.R │ ├── test_featsel_FilterWrapper.R │ ├── test_featsel_analyzeFeatSelResult.R │ ├── test_featsel_filters.R │ ├── test_featsel_fselectorrcpp.R │ ├── test_featsel_generateFilterValuesData.R │ ├── test_featsel_plotFilterValues.R │ ├── test_featsel_praznik.R │ ├── test_featsel_rankSimpleFilters.R │ ├── test_featsel_selectFeatures.R │ ├── test_featsel_selectFeaturesSequential.R │ ├── test_learners_all_classif.R │ ├── test_learners_all_clusters.R │ ├── test_learners_all_general.R │ ├── test_learners_all_multilabel.R │ ├── test_learners_all_regr.R │ ├── test_learners_all_surv.R │ ├── test_learners_classiflabelswitch.R │ ├── test_lint.R │ ├── test_multilabel_cforest.R │ ├── test_parallel_mpi.R │ ├── test_parallel_socket_multicore.R │ ├── test_regr_FDboost.R │ ├── test_regr_GPfit.R │ ├── test_regr_IBk.R │ ├── test_regr_LiblineaRL2L1SVR.R │ ├── test_regr_LiblineaRL2L2SVR.R │ ├── test_regr_RRF.R │ ├── test_regr_bcart.R │ ├── test_regr_bgp.R │ ├── test_regr_bgpllm.R │ ├── test_regr_blm.R │ ├── test_regr_brnn.R │ ├── test_regr_bst.R │ ├── test_regr_btgp.R │ ├── test_regr_btgpllm.R │ ├── test_regr_btlm.R │ ├── test_regr_cforest.R │ ├── test_regr_crs.R │ ├── test_regr_ctree.R │ ├── test_regr_cubist.R │ ├── test_regr_cvglmnet.R │ ├── test_regr_earth.R │ ├── test_regr_evtree.R │ ├── test_regr_featureless.R │ ├── test_regr_fgam.R │ ├── test_regr_fnn.R │ ├── test_regr_frbs.R │ ├── test_regr_gamboost.R │ ├── test_regr_gausspr.R │ ├── test_regr_gbm.R │ ├── test_regr_glm.R │ ├── test_regr_glmboost.R │ ├── test_regr_glmnet.R │ ├── test_regr_h2odeeplearning.R │ ├── test_regr_h2ogbm.R │ ├── test_regr_h2oglm.R │ ├── test_regr_h2orandomForest.R │ ├── test_regr_kknn.R │ ├── test_regr_km.R │ ├── test_regr_ksvm.R │ ├── test_regr_laGP.R │ ├── test_regr_lm.R │ ├── test_regr_mob.R │ ├── test_regr_nnet.R │ ├── test_regr_penalized.R │ ├── test_regr_plsr.R │ ├── test_regr_randomForest.R │ ├── test_regr_ranger.R │ ├── test_regr_rpart.R │ ├── test_regr_rsm.R │ ├── test_regr_svm.R │ ├── test_regr_xgboost.R │ ├── test_stack.R │ ├── test_surv_cforest.R │ ├── test_surv_coxph.R │ ├── test_surv_cvglmnet.R │ ├── test_surv_gamboost.R │ ├── test_surv_gbm.R │ ├── test_surv_glmboost.R │ ├── test_surv_glmnet.R │ ├── test_surv_measures.R │ ├── test_surv_ranger.R │ ├── test_surv_rpart.R │ ├── test_tuneParams.R │ ├── test_tune_ModelMultiplexer.R │ ├── test_tune_getTuneResultOptPath.R │ ├── test_tune_tuneCMAES.R │ ├── test_tune_tuneDesign.R │ ├── test_tune_tuneGenSA.R │ ├── test_tune_tuneGrid.R │ ├── test_tune_tuneIrace.R │ ├── test_tune_tuneMBO.R │ ├── test_tune_tuneParamsMultiCrit.R │ ├── test_tune_tuneRandom.R │ └── test_tune_tuneThreshold.R ├── thirdparty ├── XMeans1.0.4.zip ├── gen_families.sh └── quicklint ├── tic.R ├── todo-files ├── BatchmarkToBMR.R ├── BatchmarkUseCaseRandomForest.R ├── PostprocWrapper.R ├── PreprocWrapperICA.R ├── PreprocWrapperPCA.R ├── PreprocWrapperRemoveOutliers.R ├── ProbthWrapper.R ├── RLearner_classif_bartMachine.R ├── RLearner_classif_bdk.R ├── RLearner_classif_blackboost.R ├── RLearner_classif_classiFunc.kernel.R ├── RLearner_classif_classiFunc.knn.R ├── RLearner_classif_extraTrees.R ├── RLearner_classif_fdashapelet.R ├── RLearner_classif_geoDA.R ├── RLearner_classif_grplasso.R ├── RLearner_classif_linDA.R ├── RLearner_classif_llr.R ├── RLearner_classif_mxff.R ├── RLearner_classif_nodeHarvest.R ├── RLearner_classif_obliqueRF.R ├── RLearner_classif_parallelForest.R ├── RLearner_classif_penalizedSVM.R ├── RLearner_classif_plsDA.R ├── RLearner_classif_quaDA.R ├── RLearner_classif_randomForestSRC.R ├── RLearner_classif_randomForestSRCSyn.R ├── RLearner_classif_randomUniformForest.R ├── RLearner_classif_rknn.R ├── RLearner_classif_rrlda.R ├── RLearner_classif_sparseMDA.R ├── RLearner_classif_wsrf.R ├── RLearner_classif_xyf.R ├── RLearner_multilabel_randomForestSRC.R ├── RLearner_regr_bagEarth.R ├── RLearner_regr_bartMachine.R ├── RLearner_regr_bdk.R ├── RLearner_regr_blackboost.R ├── RLearner_regr_extraTrees.R ├── RLearner_regr_fdafgam.R ├── RLearner_regr_icr.R ├── RLearner_regr_logicreg.R ├── RLearner_regr_mxff.R ├── RLearner_regr_nodeHarvest.R ├── RLearner_regr_randomForestSRC.R ├── RLearner_regr_randomForestSRCSyn.R ├── RLearner_regr_randomUniformForest.R ├── RLearner_regr_rknn.R ├── RLearner_regr_sg.libsvm.R ├── RLearner_regr_slim.R ├── RLearner_regr_xyf.R ├── RLearner_surv_CoxBoost.R ├── RLearner_surv_cv.CoxBoost.R ├── RLearner_surv_randomForestSRC.R ├── StackResults │ ├── boot_classifStack.R │ ├── boot_multiclassifStack.R │ ├── boot_multiclassifStack_cmc_rp.R │ ├── boot_regrStack.R │ ├── classifBaseLearner.pdf │ ├── classifStack_acc.pdf │ ├── classifStack_auc.pdf │ ├── multiclassifBaseLearner.pdf │ ├── multiclassifStack_acc.pdf │ ├── multiclassifStack_auc.pdf │ ├── regrBaseLearner.pdf │ └── regrStack.pdf ├── StepStackLearner.R ├── TuneControlMies.R ├── TuneControlOptim.R ├── analyzeFeatSelResult.R ├── batchmark.R ├── convertColumnNames.R ├── createInteractions.R ├── dps.R ├── dps_exps.R ├── fda_multires_features.c ├── feature_selection.Rmd ├── getROCCoords.R ├── makeTaskDescFromData.R ├── measures_ks.R ├── mlroverview.org ├── oml_mlr_measures.R ├── plotViperCharts.R ├── plotggVIS.R ├── repair_style.sh ├── setCosts.R ├── summarizeNA.R ├── test_base_friedmanTestBMR.R ├── test_base_plotViperCharts.R ├── test_base_resample_dps.R ├── test_base_rocmlr.R ├── test_classif_bartMachine.R ├── test_classif_bdk.R ├── test_classif_blackboost.R ├── test_classif_extraTrees.R ├── test_classif_fda_classiFunc.kernel.R ├── test_classif_fda_classiFunc.knn.R ├── test_classif_fdashapelets.R ├── test_classif_geoDA.R ├── test_classif_linDA.R ├── test_classif_mxff.R ├── test_classif_nodeHarvest.R ├── test_classif_obliqueRF.R ├── test_classif_parallelForest.R ├── test_classif_plsDA.R ├── test_classif_quaDA.R ├── test_classif_randomForestSRC.R ├── test_classif_randomForestSRCSyn.R ├── test_classif_randomUniformForest.R ├── test_classif_rknn.R ├── test_classif_rrlda.R ├── test_classif_sparseMDA.R ├── test_classif_wsrf.R ├── test_classif_xyf.R ├── test_cluster_dbscan.R ├── test_extra_chains.R ├── test_multilabel_randomForestSRC.R ├── test_preproc_extra_PreprocWrapperICA.R ├── test_preproc_extra_PreprocWrapperPCA.R ├── test_preproc_extra_PreprocWrapperRemoveOutliers.R ├── test_regr_bartMachine.R ├── test_regr_bdk.R ├── test_regr_blackboost.R ├── test_regr_extraTrees.R ├── test_regr_fdafgam.R ├── test_regr_logicreg.R ├── test_regr_mxff.R ├── test_regr_nodeHarvest.R ├── test_regr_randomForestSRC.R ├── test_regr_randomForestSRCSyn.R ├── test_regr_randomUniformForest.R ├── test_regr_rknn.R ├── test_regr_slim.R ├── test_regr_xyf.R ├── test_surv_CoxBoost.R ├── test_surv_cv.CoxBoost.R ├── test_surv_randomForestSRC.R ├── test_tuneMIES.R ├── test_tune_tuneOptim.R ├── tuneOptim.R └── tune_mies.R └── vignettes ├── img ├── benchmark_processing.png ├── learn_task.png ├── learner.png ├── mlrLogo.psd ├── mlrLogo_blue_141x64.png ├── mlrLogo_blue_566x256.png ├── mlrLogo_favicon.png ├── mlrLogo_white_141x64.png ├── mlrLogo_white_566x256.png ├── mlrLogo_white_88x40.png ├── mlr_logo-120x64.png ├── mlr_logo.png ├── nested_resampling.png ├── resampling.png ├── resampling_desc_and_instance.png ├── roc_example.png ├── spatial_cross_validation.png ├── theoretic_threshold.png ├── theoretic_weight_positive.png ├── train-basic_processing.png ├── tune-varsel_processing.png ├── variabel_selection_scheme.png └── weight_positive.png ├── mlr.Rmd └── tutorial ├── _mlr-tutorial_intro.Rmd ├── advanced_tune.Rmd ├── bagging.Rmd ├── benchmark_experiments.Rmd ├── classifier_calibration.Rmd ├── configureMlr.Rmd ├── cost_sensitive_classif.Rmd ├── create_filter.Rmd ├── create_imputation.Rmd ├── create_learner.Rmd ├── create_measure.Rmd ├── example_tasks.Rmd ├── feature_selection.Rmd ├── filter_methods.Rmd ├── functional_data.Rmd ├── functional_data.html ├── handling_of_spatial_data.Rmd ├── hyperpar_tuning_effects.Rmd ├── impute.Rmd ├── integrated_learners.Rmd ├── learner.Rmd ├── learning_curve.Rmd ├── measures.Rmd ├── mlr_publications.Rmd ├── multilabel.Rmd ├── nested_resampling.Rmd ├── out_of_bag_predictions.Rmd ├── over_and_undersampling.Rmd ├── parallelization.Rmd ├── partial_dependence.Rmd ├── performance.Rmd ├── predict.Rmd ├── preproc.Rmd ├── resample.Rmd ├── roc_analysis.Rmd ├── task.Rmd ├── train.Rmd ├── tune.Rmd ├── usecase_regression.Rmd ├── visualization.Rmd └── wrapper.Rmd /.Rbuildignore: -------------------------------------------------------------------------------- 1 | todo-files 2 | ^test*.R$ 3 | .gitignore 4 | .ignore 5 | .travis.yml 6 | ^.*tar.gz$ 7 | Makefile 8 | README.md 9 | test_all.R 10 | test_src.R 11 | todo.txt 12 | src/*.o 13 | src/*.so 14 | testinfo.RData 15 | ^.*\.Rproj$ 16 | ^\.Rproj\.user$ 17 | ^man-roxygen 18 | ^.editorconfig$ 19 | ^travis_wait_*.log 20 | ^thirdparty$ 21 | ^.git$ 22 | ^makeR$ 23 | .github 24 | ^docs$ 25 | ^_pkgdown\.yml$ 26 | logo.png 27 | ^pkgdown$ 28 | tic.R 29 | tutorial/* 30 | ^vignettes/pdf/* 31 | appveyor.yml 32 | ^addon/* 33 | cran-comments.md 34 | revdep/* 35 | ^CRAN-RELEASE$ 36 | .pre-commit-config.yaml 37 | ^\.circleci$ 38 | ^\.pre-commit-config\.yaml$ 39 | ^\.vscode$ 40 | ^src/tempdir/\.ccache$ 41 | ^\.ccache$ 42 | ^clang.*$ 43 | .lintr 44 | ^\.github$ 45 | ^clang-.* 46 | ^gfortran.* 47 | ^cran-comments\.md$ 48 | ^inst/debug\.R$ 49 | ^data-raw$ 50 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # See http://editorconfig.org 2 | root = true 3 | 4 | [*] 5 | charset = utf8 6 | end_of_line = lf 7 | insert_final_newline = true 8 | indent_style = space 9 | trim_trailing_whitespace = true 10 | 11 | [*.{r,R}] 12 | indent_size = 2 13 | 14 | [*.{c,h}] 15 | indent_size = 4 16 | 17 | [*.{cpp,hpp}] 18 | indent_size = 4 19 | 20 | [{NEWS,DESCRIPTION,LICENSE}] 21 | max_line_length = 80 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/miscellaneous.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Miscellaneous 3 | about: Suggest an idea or raise a concern. 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Prework 11 | 12 | - [ ] Search for duplicates among the [existing issues](https://github.com/mlr-org/mlr/issues), both open and closed. 13 | 14 | ## Description 15 | 16 | Describe the issue clearly and concisely. If applicable, write a minimal example in R code or pseudo-code to show input, usage, and desired output. 17 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question 3 | about: Ask a question. 4 | title: '' 5 | labels: 'type-question' 6 | 7 | --- 8 | 9 | ## Prework 10 | 11 | - [ ] Search for duplicates among the [existing issues](https://github.com/mlr-org/mlr/issues), both open and closed. 12 | - [ ] Consider instead posting to [Stack Overflow](https://stackoverflow.com) under the [`mlr` tag](https://stackoverflow.com/tags/mlr). 13 | 14 | ## Question 15 | 16 | What would you like to know? 17 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | We are always happy to receive pull requests. 2 | 3 | Please make sure you have read our [coding guidelines](https://github.com/mlr-org/mlr3/wiki/style-guide). 4 | 5 | This means in particular that you have understood: 6 | 7 | * The [style guide&(https://github.com/mlr-org/mlr3/wiki/style-guide) - our lintr will provide you feedback on this 8 | * Circle CI will run the tests for you - no need to punch your machine with it (they take about 30 mins) 9 | * You can run `devtools::test()` locally in RStudio and tests will be skipped 10 | 11 | Please request a review from the following people on your PR: 12 | 13 | - pat-s 14 | - larskotthoff 15 | - mllg 16 | - berndbischl 17 | 18 | You might want to join our slack at: 19 | https://mlr-org.slack.com 20 | -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Number of days of inactivity before an issue becomes stale 2 | daysUntilStale: 300 3 | # Number of days of inactivity before a stale issue is closed 4 | daysUntilClose: 7 5 | # Issues with these labels will never be considered stale 6 | exemptLabels: 7 | - pinned 8 | - prio-high 9 | - prio-blocking 10 | - type-bug 11 | # Label to use when marking an issue as stale 12 | staleLabel: stale 13 | # Comment to post when marking an issue as stale. Set to `false` to disable 14 | markComment: > 15 | This issue has been automatically marked as stale because it has not had 16 | recent activity. It will be closed if no further activity occurs. Thank you 17 | for your contributions. 18 | # Comment to post when closing a stale issue. Set to `false` to disable 19 | closeComment: false 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | mlr.Rcheck 2 | mlr_*.tar.gz 3 | .Rproj.user 4 | .Rhistory 5 | src/*.so 6 | src/*.o 7 | .RData 8 | src/*.dll 9 | tests/testthat/Rplots.pdf 10 | testinfo.RData 11 | inst/doc/ 12 | .directory 13 | .gdb_history 14 | .DS_Store 15 | ehthumbs.db 16 | Icon? 17 | Thumbs.db 18 | *xgboost.model 19 | .#* 20 | *~ 21 | \#*# 22 | .project 23 | .settings 24 | .ipynb_checkpoints 25 | revdep/ 26 | cran-comments.md 27 | .vscode/ 28 | docs/ 29 | CRAN-RELEASE 30 | inst/debug.R 31 | -------------------------------------------------------------------------------- /.ignore: -------------------------------------------------------------------------------- 1 | man/* 2 | todo-files/* 3 | docs/* 4 | -------------------------------------------------------------------------------- /.lintr: -------------------------------------------------------------------------------- 1 | linters: with_defaults( 2 | # lintr defaults: https://github.com/jimhester/lintr#available-linters 3 | # the following setup changes/removes certain linters 4 | assignment_linter = NULL, # do not force using <- for assignments 5 | object_name_linter = object_name_linter(c("snake_case", "CamelCase")), # only allow snake case and camel case object names 6 | cyclocomp_linter = NULL, # do not check function complexity 7 | commented_code_linter = NULL, # allow code in comments 8 | todo_comment_linter = NULL, 9 | line_length_linter = line_length_linter(120) 10 | ) 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | YEAR: 2013-2018 2 | COPYRIGHT HOLDER: Bernd Bischl 3 | -------------------------------------------------------------------------------- /R/ChainModel.R: -------------------------------------------------------------------------------- 1 | #' Only exported for internal use. 2 | #' @param next.model ([WrappedModel])\cr 3 | #' The next model. 4 | #' @param cl ([character])\cr 5 | #' Subclass to assign to the resulting model. 6 | #' 7 | #' @keywords internal 8 | #' @export 9 | makeChainModel = function(next.model, cl) { 10 | setClasses(list(next.model = next.model), c(cl, "ChainModel", "WrappedModel")) 11 | } 12 | 13 | 14 | #' @export 15 | getLearnerModel.BaseWrapperModel = function(model, more.unwrap = FALSE) { 16 | # FIXME: this structure and special-cases really suck. FailureModel and NoFeaturesModel 17 | # should probably be redesigned at some point 18 | if (inherits(model$learner.model, "NoFeaturesModel")) { 19 | return(model$learner.model) 20 | } 21 | if (more.unwrap) { 22 | getLearnerModel(model$learner.model$next.model, more.unwrap = TRUE) 23 | } else { 24 | model$learner.model$next.model 25 | } 26 | } 27 | 28 | #' @export 29 | print.ChainModel = function(x, ...) { 30 | print(x$next.model) 31 | } 32 | -------------------------------------------------------------------------------- /R/ChainModel_operators.R: -------------------------------------------------------------------------------- 1 | getLeafModel = function(model) { 2 | if (inherits(model, "BaseWrapperModel")) { 3 | return(getLeafModel(model$learner.model$next.model)) 4 | } 5 | return(model) 6 | } 7 | -------------------------------------------------------------------------------- /R/FeatSelControlExhaustive.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | #' @rdname FeatSelControl 3 | makeFeatSelControlExhaustive = function(same.resampling.instance = TRUE, 4 | maxit = NA_integer_, max.features = NA_integer_, tune.threshold = FALSE, tune.threshold.args = list(), 5 | log.fun = "default") { 6 | makeFeatSelControl(same.resampling.instance = same.resampling.instance, 7 | maxit = maxit, max.features = max.features, 8 | tune.threshold = tune.threshold, tune.threshold.args = tune.threshold.args, 9 | log.fun = log.fun, cl = "FeatSelControlExhaustive") 10 | } 11 | -------------------------------------------------------------------------------- /R/FeatSelControlRandom.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | #' @rdname FeatSelControl 3 | makeFeatSelControlRandom = function(same.resampling.instance = TRUE, 4 | maxit = 100L, max.features = NA_integer_, prob = 0.5, tune.threshold = FALSE, 5 | tune.threshold.args = list(), log.fun = "default") { 6 | maxit = asCount(maxit, positive = TRUE) 7 | makeFeatSelControl(same.resampling.instance = same.resampling.instance, 8 | maxit = maxit, max.features = max.features, prob = prob, tune.threshold = tune.threshold, 9 | tune.threshold.args = tune.threshold.args, log.fun = log.fun, cl = "FeatSelControlRandom") 10 | } 11 | -------------------------------------------------------------------------------- /R/FeatSelControlSequential.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | #' @rdname FeatSelControl 3 | makeFeatSelControlSequential = function(same.resampling.instance = TRUE, impute.val = NULL, method, 4 | alpha = 0.01, beta = -0.001, maxit = NA_integer_, max.features = NA_integer_, 5 | tune.threshold = FALSE, tune.threshold.args = list(), log.fun = "default") { 6 | makeFeatSelControl( 7 | same.resampling.instance = same.resampling.instance, 8 | impute.val = impute.val, 9 | maxit = maxit, 10 | max.features = max.features, 11 | method = method, 12 | alpha = alpha, 13 | beta = beta, 14 | tune.threshold = tune.threshold, 15 | tune.threshold.args = tune.threshold.args, 16 | log.fun = log.fun, 17 | cl = "FeatSelControlSequential" 18 | ) 19 | } 20 | -------------------------------------------------------------------------------- /R/HoldoutInstance_make_fixed.R: -------------------------------------------------------------------------------- 1 | #' Generate a fixed holdout instance for resampling. 2 | #' 3 | #' @param train.inds ([integer])\cr 4 | #' Indices for training set. 5 | #' @param test.inds ([integer])\cr 6 | #' Indices for test set. 7 | #' @param size (`integer(1)`)\cr 8 | #' Size of the data set to resample. 9 | #' The function needs to know the largest possible index of the whole data set. 10 | #' @return ([ResampleInstance]). 11 | #' @export 12 | makeFixedHoldoutInstance = function(train.inds, test.inds, size) { 13 | train.inds = asInteger(train.inds, any.missing = FALSE) 14 | test.inds = asInteger(test.inds, any.missing = FALSE) 15 | size = asInt(size, lower = 1L) 16 | rdesc = makeResampleDesc("Holdout", split = length(train.inds) / size) 17 | rin = makeResampleInstance(rdesc, size = size) 18 | rin$train.inds[[1L]] = train.inds 19 | rin$test.inds[[1L]] = test.inds 20 | return(rin) 21 | } 22 | -------------------------------------------------------------------------------- /R/OptControl.R: -------------------------------------------------------------------------------- 1 | makeOptControl = function(same.resampling.instance, impute.val = NULL, tune.threshold = FALSE, 2 | tune.threshold.args = list(), log.fun = "default", final.dw.perc = NULL, ...) { 3 | assertFlag(same.resampling.instance) 4 | if (!is.null(impute.val)) { 5 | assertNumeric(impute.val) 6 | } 7 | assertFunction(log.fun, 8 | args = c("learner", "task", "resampling", "measures", "par.set", "control", "opt.path", "dob", "x", "y", "remove.nas", "stage", "prev.stage")) 9 | assertFlag(tune.threshold) 10 | makeS3Obj("OptControl", 11 | same.resampling.instance = same.resampling.instance, 12 | impute.val = impute.val, 13 | tune.threshold = tune.threshold, 14 | tune.threshold.args = tune.threshold.args, 15 | log.fun = log.fun, 16 | final.dw.perc = final.dw.perc, 17 | extra.args = list(...) 18 | ) 19 | } 20 | -------------------------------------------------------------------------------- /R/OptResult.R: -------------------------------------------------------------------------------- 1 | makeOptResult = function(learner, control, x, y, resampling, threshold, opt.path, cl, ...) { 2 | res = list( 3 | learner = learner, 4 | control = control, 5 | x = x, 6 | y = y, 7 | resampling = resampling, 8 | threshold = threshold, 9 | opt.path = opt.path 10 | ) 11 | res = c(res, list(...)) 12 | setClasses(res, c(cl, "OptResult")) 13 | } 14 | -------------------------------------------------------------------------------- /R/OptWrapper.R: -------------------------------------------------------------------------------- 1 | makeOptWrapper = function(id, learner, resampling, measures, par.set, bit.names, bits.to.features, 2 | control, show.info, learner.subclass, model.subclass) { 3 | 4 | x = makeBaseWrapper(id, learner$type, learner, learner.subclass = c(learner.subclass, "OptWrapper"), 5 | model.subclass = model.subclass) 6 | x$resampling = resampling 7 | x$measures = measures 8 | x$opt.pars = par.set 9 | x$bit.names = bit.names 10 | x$bits.to.features = bits.to.features 11 | x$opt.pars = par.set 12 | x$control = control 13 | x$show.info = show.info 14 | return(x) 15 | } 16 | 17 | #' @export 18 | print.OptModel = function(x, ...) { 19 | print.WrappedModel(x) 20 | cat("\nOptimization result:\n") 21 | print(x$learner.model$opt.result) 22 | } 23 | -------------------------------------------------------------------------------- /R/RLearner_classif_naiveBayes.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | makeRLearner.classif.naiveBayes = function() { 3 | makeRLearnerClassif( 4 | cl = "classif.naiveBayes", 5 | package = "e1071", 6 | par.set = makeParamSet( 7 | makeNumericLearnerParam(id = "laplace", default = 0, lower = 0) 8 | # makeNumericLearnerParam(id = "threshold", default = 0.001, lower = 0) 9 | ), 10 | properties = c("twoclass", "multiclass", "missings", "numerics", "factors", "prob"), 11 | name = "Naive Bayes", 12 | short.name = "nbayes", 13 | callees = "naiveBayes" 14 | ) 15 | } 16 | 17 | #' @export 18 | trainLearner.classif.naiveBayes = function(.learner, .task, .subset, .weights = NULL, ...) { 19 | f = getTaskFormula(.task) 20 | e1071::naiveBayes(f, data = getTaskData(.task, .subset), ...) 21 | } 22 | 23 | #' @export 24 | predictLearner.classif.naiveBayes = function(.learner, .model, .newdata, ...) { 25 | type = ifelse(.learner$predict.type == "response", "class", "raw") 26 | predict(.model$learner.model, newdata = .newdata, type = type, ...) 27 | } 28 | -------------------------------------------------------------------------------- /R/RLearner_multilabel_rFerns.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | makeRLearner.multilabel.rFerns = function() { 3 | makeRLearnerMultilabel( 4 | cl = "multilabel.rFerns", 5 | package = "rFerns", 6 | par.set = makeParamSet( 7 | makeIntegerLearnerParam(id = "depth", default = 5L), 8 | makeIntegerLearnerParam(id = "ferns", default = 1000L) 9 | ), 10 | properties = c("numerics", "factors", "ordered"), 11 | name = "Random ferns", 12 | short.name = "rFerns", 13 | callees = "rFerns" 14 | ) 15 | } 16 | 17 | #' @export 18 | trainLearner.multilabel.rFerns = function(.learner, .task, .subset, .weights = NULL, ...) { 19 | d = getTaskData(.task, .subset, target.extra = TRUE) 20 | rFerns::rFerns(x = d$data, y = as.matrix(d$target), ...) 21 | } 22 | 23 | #' @export 24 | predictLearner.multilabel.rFerns = function(.learner, .model, .newdata, ...) { 25 | as.matrix(predict(.model$learner.model, .newdata, ...)) 26 | } 27 | -------------------------------------------------------------------------------- /R/TuneMultiCritControlGrid.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | #' @param resolution ([integer])\cr 3 | #' Resolution of the grid for each numeric/integer parameter in `par.set`. 4 | #' For vector parameters, it is the resolution per dimension. 5 | #' Either pass one resolution for all parameters, or a named vector. 6 | #' See [ParamHelpers::generateGridDesign]. 7 | #' Default is 10. 8 | #' @rdname TuneMultiCritControl 9 | makeTuneMultiCritControlGrid = function(same.resampling.instance = TRUE, 10 | resolution = 10L, log.fun = "default", final.dw.perc = NULL, budget = NULL) { 11 | resolution = asCount(resolution, positive = TRUE) 12 | makeTuneMultiCritControl(same.resampling.instance = same.resampling.instance, 13 | resolution = resolution, log.fun = log.fun, final.dw.perc = final.dw.perc, 14 | budget = budget, cl = "TuneMultiCritControlGrid") 15 | } 16 | -------------------------------------------------------------------------------- /R/TuneMultiCritControlRandom.R: -------------------------------------------------------------------------------- 1 | #' @param maxit (`integer(1)`)\cr 2 | #' Number of iterations for random search. 3 | #' Default is 100. 4 | #' @export 5 | #' @rdname TuneMultiCritControl 6 | makeTuneMultiCritControlRandom = function(same.resampling.instance = TRUE, 7 | maxit = 100L, log.fun = "default", final.dw.perc = NULL, budget = NULL) { 8 | if (is.null(budget)) { 9 | budget = maxit 10 | } else if (is.null(maxit)) { 11 | maxit = budget 12 | } else if (budget != maxit) { 13 | stopf("The parameters budget (%i) and maxit (%i) differ.", budget, maxit) 14 | } 15 | maxit = asCount(maxit) 16 | budget = asCount(budget) 17 | 18 | makeTuneMultiCritControl(same.resampling.instance = same.resampling.instance, 19 | maxit = maxit, log.fun = log.fun, final.dw.perc = final.dw.perc, 20 | budget = budget, cl = "TuneMultiCritControlRandom") 21 | } 22 | -------------------------------------------------------------------------------- /R/cache_helpers.R: -------------------------------------------------------------------------------- 1 | #' @title Get or delete mlr cache directory 2 | #' 3 | #' @description Helper functions to deal with mlr caching. 4 | #' @name cache_helpers 5 | #' @rdname cache_helpers 6 | #' @details 7 | #' `getCacheDir()` returns the default mlr cache directory \cr 8 | #' `deleteCacheDir()` clears the default mlr cache directory. Custom cache 9 | #' directories must be deleted by hand. 10 | NULL 11 | 12 | #' @rdname cache_helpers 13 | #' @export 14 | getCacheDir = function() { 15 | rappdirs::user_cache_dir("mlr", "mlr-org") 16 | } 17 | 18 | #' @rdname cache_helpers 19 | #' @export 20 | deleteCacheDir = function() { 21 | unlink(rappdirs::user_cache_dir("mlr", "mlr-org"), recursive = TRUE) 22 | messagef("Successfully cleared directory '%s'.", rappdirs::user_cache_dir("mlr", "mlr-org")) 23 | } 24 | -------------------------------------------------------------------------------- /R/checkAggrBeforeResample.R: -------------------------------------------------------------------------------- 1 | # check whether rdesc$predict is set, so that the requiring properties of the measure are satisfied 2 | # called the beginning of resample 3 | checkAggrBeforeResample = function(measure, rdesc) { 4 | a = measure$aggr 5 | p = a$properties 6 | pred = rdesc$predict 7 | p.allowed = if (all(c("req.train", "req.test") %in% p)) { 8 | "both" 9 | } else if ("req.train" %in% p) { 10 | c("train", "both") 11 | } else if ("req.test" %in% p) { 12 | c("test", "both") 13 | } else { 14 | c("train", "test", "both") 15 | } 16 | if (pred %nin% p.allowed) { 17 | stopf("Aggregation '%s' not compatible with resampling! You have to set arg 'predict' to %s in your resample object, instead it is '%s'!", a$id, stri_paste("'", p.allowed, "'", collapse = " or "), pred) 18 | } 19 | } 20 | 21 | # map the checker over multiple measures 22 | checkAggrsBeforeResample = function(measures, rdesc) { 23 | lapply(measures, checkAggrBeforeResample, rdesc = rdesc) 24 | } 25 | -------------------------------------------------------------------------------- /R/checkBMRMeasure.R: -------------------------------------------------------------------------------- 1 | # small arg checker for a selected measure for a BMR 2 | # if NULL, the 1st measure in the BMR is returned 3 | checkBMRMeasure = function(measure, bmr) { 4 | if (is.null(measure)) { 5 | measure = getBMRMeasures(bmr)[[1]] 6 | } else { 7 | assertClass(measure, "Measure") 8 | assertChoice(measure$id, getBMRMeasureIds(bmr)) 9 | } 10 | return(measure) 11 | } 12 | -------------------------------------------------------------------------------- /R/checkMeasures.R: -------------------------------------------------------------------------------- 1 | checkMeasures = function(measures, obj, aggr = NULL) { 2 | if (missing(measures) || is.null(measures)) { 3 | measures = list(getDefaultMeasure(obj)) 4 | } else { 5 | measures = ensureVector(measures, n = 1L, cl = "Measure") 6 | assertList(measures, types = "Measure", min.len = 1L) 7 | } 8 | if (!is.null(aggr)) { 9 | measures = lapply(measures, setAggregation, aggr = aggr) 10 | } 11 | return(measures) 12 | } 13 | -------------------------------------------------------------------------------- /R/checkTask.R: -------------------------------------------------------------------------------- 1 | # performs arg checks of a task (or maybe also allow an taskdesc) 2 | # you can check that the task is from a list of certain types 3 | checkTask = function(x, cl = "Task", allow.desc = FALSE, task.type = NULL, binary = FALSE, .var.name = "task") { 4 | if (allow.desc) { 5 | assert(.var.name = .var.name, 6 | checkClass(x, classes = cl), 7 | checkClass(x, "TaskDesc") 8 | ) 9 | } else { 10 | assertClass(x, classes = cl, .var.name = .var.name) 11 | } 12 | td = getTaskDesc(x) 13 | 14 | if (!is.null(task.type) && td$type %nin% task.type) { 15 | stopf("Task must be one of '%s', but is: '%s'", collapse(task.type), td$type) 16 | } 17 | if (binary && length(td$class.levels) != 2L) { 18 | stopf("Task '%s' must be binary classification!", td$id) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /R/checkTaskSubset.R: -------------------------------------------------------------------------------- 1 | # @template arg_subset 2 | # @param size [int(1)]\cr size of the dataset to subset 3 | # @return numeric vector of subset indices 4 | checkTaskSubset = function(subset = NULL, size) { 5 | assertCount(size) 6 | if (is.null(subset)) { 7 | seq_len(size) 8 | } else if (is.logical(subset)) { 9 | subset = which(subset) 10 | assertInteger(subset, min.len = 1L, upper = size) 11 | } else { 12 | asInteger(subset, min.len = 1L, any.missing = FALSE, lower = 1L, upper = size) 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /R/crossover.R: -------------------------------------------------------------------------------- 1 | #' Crossover. 2 | #' 3 | #' Takes two bit strings and creates a new one of the same size by selecting the items from the first string or 4 | #' the second, based on a given rate (the probability of choosing an element from the first string). 5 | #' 6 | #' @param x ([logical])\cr 7 | #' First parent string. 8 | #' @param y ([logical])\cr 9 | #' Second parent string. 10 | #' @param rate (`numeric(1)`)\cr 11 | #' A number representing the probability of selecting an element of the first string. 12 | #' Default is `0.5`. 13 | #' @return ([crossover]). 14 | #' @name crossover 15 | #' @rdname crossover 16 | #' @aliases crossover 17 | NULL 18 | 19 | crossover = function(x, y, rate = 0.5) { 20 | ratio = rbinom(length(x), 1, rate) 21 | ifelse(ratio == 1, x, y) 22 | } 23 | -------------------------------------------------------------------------------- /R/dropFeatures.R: -------------------------------------------------------------------------------- 1 | #' Drop some features of task. 2 | #' 3 | #' @template arg_task 4 | #' @param features ([character])\cr 5 | #' Features to drop. 6 | #' @template ret_task 7 | #' @export 8 | #' @family eda_and_preprocess 9 | dropFeatures = function(task, features) { 10 | assertClass(task, classes = "Task") 11 | f = getTaskFeatureNames(task) 12 | assertSubset(features, choices = f) 13 | subsetTask(task, features = setdiff(f, features)) 14 | } 15 | -------------------------------------------------------------------------------- /R/getFeatSelResult.R: -------------------------------------------------------------------------------- 1 | #' Returns the selected feature set and optimization path after training. 2 | #' 3 | #' @param object ([WrappedModel])\cr 4 | #' Trained Model created with [makeFeatSelWrapper]. 5 | #' @return ([FeatSelResult]). 6 | #' @export 7 | #' @family featsel 8 | getFeatSelResult = function(object) { 9 | assertClass(object, "FeatSelModel") 10 | object$learner.model$opt.result 11 | } 12 | -------------------------------------------------------------------------------- /R/getParamSet.R: -------------------------------------------------------------------------------- 1 | #' @title Get a description of all possible parameter settings for a learner. 2 | #' 3 | #' @description 4 | #' Returns the [ParamHelpers::ParamSet] from a [Learner]. 5 | #' 6 | #' @template ret_ps 7 | #' @family learner 8 | #' @name getParamSet 9 | #' @rdname getParamSet 10 | NULL 11 | 12 | #' @export 13 | getParamSet.Learner = function(x) { 14 | x$par.set 15 | } 16 | 17 | #' @export 18 | getParamSet.character = function(x) { 19 | x = checkLearner(x) 20 | getParamSet(x) 21 | } 22 | -------------------------------------------------------------------------------- /R/getTaskConstructorForLearner.R: -------------------------------------------------------------------------------- 1 | getTaskConstructorForLearner = function(learner) { 2 | while (inherits(learner, "BaseWrapper")) { 3 | learner = learner$next.learner 4 | } 5 | cl = class(learner) 6 | 7 | if ("RLearnerRegr" %in% cl) { 8 | constructor = makeRegrTask 9 | } else if ("RLearnerClassif" %in% cl) { 10 | constructor = makeClassifTask 11 | } else if ("RLearnerSurv" %in% cl) { 12 | constructor = makeSurvTask 13 | } else if ("RLearnerCluster" %in% cl) { 14 | constructor = makeClusterTask 15 | } else { 16 | stop("Unknown learner class for impute") 17 | } 18 | constructor 19 | } 20 | -------------------------------------------------------------------------------- /R/getTuneResult.R: -------------------------------------------------------------------------------- 1 | #' Returns the optimal hyperparameters and optimization path after training. 2 | #' 3 | #' @param object ([WrappedModel])\cr 4 | #' Trained Model created with [makeTuneWrapper]. 5 | #' @return ([TuneResult]). 6 | #' @family tune 7 | #' @export 8 | getTuneResult = function(object) { 9 | assertClass(object, "TuneModel") 10 | object$learner.model$opt.result 11 | } 12 | -------------------------------------------------------------------------------- /R/getTuneThresholdExtra.R: -------------------------------------------------------------------------------- 1 | # in case we have tune.threshold activated we want this as an extra. otherwise NULL 2 | # @arg control 3 | # control [TuneControl] 4 | # res [result from evalOptimizationState] 5 | getTuneThresholdExtra = function(control, res) { 6 | if (control$tune.threshold) { 7 | # add class names to threshold, if longer than 1 8 | extra = as.list(res$threshold) 9 | setNames(extra, stri_paste("threshold", ifelse(length(extra) > 1L, ".", ""), names(extra), ignore_null = TRUE)) 10 | } else { 11 | NULL 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /R/hasFunctionalFeatures.R: -------------------------------------------------------------------------------- 1 | #' @title Check whether the object contains functional features. 2 | #' 3 | #' @description 4 | #' See title. 5 | #' 6 | #' @param obj (`Task` | `TaskDesc` | `data.frame`)\cr 7 | #' Object to check. 8 | #' @return (`logical(1)`) 9 | #' @export 10 | hasFunctionalFeatures = function(obj) { 11 | UseMethod("hasFunctionalFeatures") 12 | } 13 | 14 | #' @export 15 | hasFunctionalFeatures.data.frame = function(obj) { 16 | any(vlapply(obj, is.matrix)) 17 | } 18 | 19 | #' @export 20 | hasFunctionalFeatures.Task = function(obj) { 21 | obj$task.desc$n.feat["functionals"] > 0L 22 | } 23 | -------------------------------------------------------------------------------- /R/learners.R: -------------------------------------------------------------------------------- 1 | #' @title List of supported learning algorithms. 2 | #' 3 | #' @description 4 | #' All supported learners can be found by [listLearners] or as a table 5 | #' in the tutorial appendix: . 6 | #' 7 | #' @name learners 8 | #' @rdname learners 9 | NULL 10 | -------------------------------------------------------------------------------- /R/mutateBits.R: -------------------------------------------------------------------------------- 1 | # Takes a bit string and flips its elements based on a given mutation rate. 2 | # 3 | 4 | mutateBits = function(x, rate = 1 / length(x)) { 5 | n = length(x) 6 | flip = rbinom(n, 1, rate) 7 | (x + flip) %% 2 8 | } 9 | -------------------------------------------------------------------------------- /R/removeHyperPars.R: -------------------------------------------------------------------------------- 1 | #' @title Remove hyperparameters settings of a learner. 2 | #' 3 | #' @description 4 | #' Remove settings (previously set through mlr) for some parameters. 5 | #' Which means that the default behavior for that param will now be used. 6 | #' 7 | #' @template arg_learner 8 | #' @param ids ([character])\cr 9 | #' Parameter names to remove settings for. 10 | #' Default is `character(0L)`. 11 | #' @template ret_learner 12 | #' @export 13 | #' @family learner 14 | removeHyperPars = function(learner, ids = character(0L)) { 15 | assertClass(learner, classes = "Learner") 16 | assertCharacter(ids, any.missing = FALSE) 17 | d = setdiff(ids, names(getHyperPars(learner))) 18 | if (length(d) > 0L) { 19 | stopf("Trying to remove param settings which were not set before: %s", collapse(d)) 20 | } 21 | UseMethod("removeHyperPars") 22 | } 23 | 24 | #' @export 25 | removeHyperPars.Learner = function(learner, ids = character(0L)) { 26 | learner$par.vals[ids] = NULL 27 | return(learner) 28 | } 29 | -------------------------------------------------------------------------------- /R/selectFeaturesExhaustive.R: -------------------------------------------------------------------------------- 1 | selectFeaturesExhaustive = function(learner, task, resampling, measures, bit.names, bits.to.features, control, opt.path, show.info) { 2 | p = length(bit.names) 3 | states = list(rep(0, p)) 4 | for (i in seq_len(min(control$max.features, p, na.rm = TRUE))) { 5 | x = combn(seq_len(p), i) 6 | s = lapply(seq_col(x), function(j) { 7 | b = rep(0, p) 8 | b[x[, j]] = 1 9 | b 10 | }) 11 | states = c(states, s) 12 | } 13 | evalOptimizationStatesFeatSel(learner, task, resampling, measures, bits.to.features, control, 14 | opt.path, show.info, states, 1L, as.integer(NA)) 15 | makeFeatSelResultFromOptPath(learner, measures, resampling, control, opt.path, task = task, bits.to.features = bits.to.features) 16 | } 17 | -------------------------------------------------------------------------------- /R/selectFeaturesRandom.R: -------------------------------------------------------------------------------- 1 | selectFeaturesRandom = function(learner, task, resampling, measures, bit.names, bits.to.features, 2 | control, opt.path, show.info) { 3 | states = lapply(seq_len(control$maxit), function(i) { 4 | createStates(n = length(bit.names), 5 | max.features = control$max.features, prob = control$extra.args$prob) 6 | }) 7 | evalOptimizationStatesFeatSel(learner, task, resampling, measures, bits.to.features, 8 | control, opt.path, show.info, states, 1L, NA_integer_) 9 | makeFeatSelResultFromOptPath(learner, measures, resampling, control, opt.path, task = task, bits.to.features = bits.to.features) 10 | } 11 | 12 | # help function in order to respect max.features 13 | 14 | createStates = function(n, max.features, prob) { 15 | if (is.na(max.features)) { 16 | return(rbinom(n, 1, prob)) 17 | } 18 | run.loop = TRUE 19 | while (run.loop) { 20 | x = rbinom(n, 1, prob) 21 | if (sum(x) <= max.features) { 22 | run.loop = FALSE 23 | } 24 | } 25 | return(x) 26 | } 27 | -------------------------------------------------------------------------------- /R/setId.R: -------------------------------------------------------------------------------- 1 | #' @title Set the id of a learner object. 2 | #' 3 | #' @description 4 | #' Deprecated, use [setLearnerId] instead. 5 | #' 6 | #' @template arg_learner 7 | #' @param id (`character(1)`)\cr 8 | #' New id for learner. 9 | #' @template ret_learner 10 | #' @export 11 | #' @family learner 12 | setId = function(learner, id) { 13 | .Deprecated("setLearnerId") 14 | learner = checkLearner(learner) 15 | assertString(id) 16 | learner$id = id 17 | return(learner) 18 | } 19 | -------------------------------------------------------------------------------- /R/setPredictThreshold.R: -------------------------------------------------------------------------------- 1 | #' @title Set the probability threshold the learner should use. 2 | #' 3 | #' @description 4 | #' See `predict.threshold` in [makeLearner] and [setThreshold]. 5 | #' 6 | #' For complex wrappers only the top-level `predict.type` is currently set. 7 | #' 8 | #' @template arg_learner 9 | #' @template arg_predictthreshold 10 | #' @template ret_learner 11 | #' @family predict 12 | #' @family learner 13 | #' @export 14 | setPredictThreshold = function(learner, predict.threshold) { 15 | learner = checkLearner(learner, type = "classif") 16 | if (learner$predict.type != "prob") { 17 | stopf("predict.type = 'prob' must hold to set a predict.threshold!") 18 | } 19 | assertNumeric(predict.threshold, any.missing = FALSE) 20 | learner$predict.threshold = predict.threshold 21 | return(learner) 22 | } 23 | -------------------------------------------------------------------------------- /R/simplifyMeasureNames.R: -------------------------------------------------------------------------------- 1 | #' @title Simplify measure names. 2 | #' 3 | #' @description 4 | #' Clips aggregation names from character vector. 5 | #' E.g: 'mmce.test.mean' becomes 'mmce'. 6 | #' Elements that don't contain a measure name are ignored and returned unchanged. 7 | #' 8 | #' @param xs ([character])\cr 9 | #' Character vector that (possibly) contains aggregated measure names. 10 | #' @return ([character]). 11 | #' @export 12 | simplifyMeasureNames = function(xs) { 13 | 14 | assertCharacter(xs, any.missing = FALSE) 15 | # get all measure names 16 | all.measure.names = listMeasures() 17 | # cut everything after and including the first '.' 18 | xs.shortened = stri_replace_all_regex(xs, "\\..*", "") 19 | # check if this is a measure 20 | string.is.measure = (xs.shortened %in% all.measure.names) 21 | # if yes: insert shortened name, else insert original input 22 | res = ifelse(string.is.measure, xs.shortened, xs) 23 | as.character(res) 24 | } 25 | -------------------------------------------------------------------------------- /R/tuneDesign.R: -------------------------------------------------------------------------------- 1 | # tunes with a given data.frame conatining the design. 2 | tuneDesign = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, resample.fun) { 3 | xs = dfRowsToList(control$extra.args$design, par.set) 4 | evalOptimizationStatesTune(learner, task, resampling, measures, par.set, control, opt.path, 5 | show.info, xs, dobs = seq_along(xs), eols = NA_integer_, remove.nas = TRUE, 6 | resample.fun = resample.fun) 7 | makeTuneResultFromOptPath(learner, par.set, measures, resampling, control, opt.path) 8 | } 9 | -------------------------------------------------------------------------------- /R/tuneGrid.R: -------------------------------------------------------------------------------- 1 | # tunes with grid search, all params are supported as we use generateGridDesign 2 | tuneGrid = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, resample.fun) { 3 | des = generateGridDesign(par.set, resolution = control$extra.args$resolution, trafo = FALSE) 4 | if (!is.null(control$budget) && (nrow(des) != control$budget)) { 5 | stopf("The given budget (%i) does not fit to the size of the grid (%i).", control$budget, nrow(des)) 6 | } 7 | xs = dfRowsToList(des, par.set) 8 | evalOptimizationStatesTune(learner, task, resampling, measures, par.set, control, opt.path, 9 | show.info, xs, dobs = seq_along(xs), eols = NA_integer_, remove.nas = TRUE, resample.fun = resample.fun) 10 | makeTuneResultFromOptPath(learner, par.set, measures, resampling, control, opt.path) 11 | } 12 | -------------------------------------------------------------------------------- /R/tuneMultiCritGrid.R: -------------------------------------------------------------------------------- 1 | # tunes with grid search, all params are supported as we use generateGridDesign 2 | tuneMultiCritGrid = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, resample.fun) { 3 | des = generateGridDesign(par.set, resolution = control$extra.args$resolution, trafo = FALSE) 4 | if (!is.null(control$budget) && (nrow(des) != control$budget)) { 5 | stopf("The given budget (%i) does not fit to the size of the grid (%i).", control$budget, nrow(des)) 6 | } 7 | xs = dfRowsToList(des, par.set) 8 | evalOptimizationStatesTune(learner, task, resampling, measures, par.set, control, opt.path, 9 | show.info, xs, dobs = seq_along(xs), eols = NA_integer_, remove.nas = TRUE, resample.fun = resample.fun) 10 | makeTuneMultiCritResultFromOptPath(learner, par.set, measures, resampling, control, opt.path) 11 | } 12 | -------------------------------------------------------------------------------- /R/tuneMultiCritNSGA2.R: -------------------------------------------------------------------------------- 1 | tuneMultiCritNSGA2 = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, resample.fun) { 2 | 3 | requirePackages("mco", why = "tuneMultiCritNSGA2", default.method = "load") 4 | low = getLower(par.set) 5 | upp = getUpper(par.set) 6 | # FIXME: we need a vectorized version 7 | 8 | args = list(fn = tunerFitnFun, idim = length(low), odim = length(measures), 9 | lower.bounds = low, upper.bounds = upp, 10 | learner = learner, task = task, resampling = resampling, measures = measures, 11 | par.set = par.set, ctrl = control, opt.path = opt.path, show.info = show.info, 12 | convertx = convertXNumeric, remove.nas = FALSE, resample.fun = resample.fun) 13 | args = c(args, control$extra.args) 14 | 15 | do.call(mco::nsga2, args) 16 | 17 | makeTuneMultiCritResultFromOptPath(learner, par.set, measures, resampling, control, opt.path) 18 | } 19 | -------------------------------------------------------------------------------- /R/tuneMultiCritRandom.R: -------------------------------------------------------------------------------- 1 | tuneMultiCritRandom = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, resample.fun) { 2 | vals = sampleValues(n = control$extra.args$maxit, par = par.set, trafo = FALSE) 3 | evalOptimizationStatesTune(learner, task, resampling, measures, par.set, control, opt.path, 4 | show.info, vals, dobs = seq_along(vals), eols = NA_integer_, remove.nas = TRUE, 5 | resample.fun = resample.fun) 6 | makeTuneMultiCritResultFromOptPath(learner, par.set, measures, resampling, control, opt.path) 7 | } 8 | -------------------------------------------------------------------------------- /R/tuneRandom.R: -------------------------------------------------------------------------------- 1 | tuneRandom = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, resample.fun) { 2 | vals = sampleValues(n = control$extra.args$maxit, par = par.set, trafo = FALSE) 3 | evalOptimizationStatesTune(learner, task, resampling, measures, par.set, control, opt.path, 4 | show.info, vals, dobs = seq_along(vals), eols = NA_integer_, remove.nas = TRUE, resample.fun) 5 | makeTuneResultFromOptPath(learner, par.set, measures, resampling, control, opt.path) 6 | } 7 | -------------------------------------------------------------------------------- /R/utils_plot.R: -------------------------------------------------------------------------------- 1 | # checks for duplicated entries in learner.names and stops 2 | # with error message containing the learner name that appeared more 3 | # than once 4 | checkDuplicatedLearnerNames = function(learner.names) { 5 | dupl = duplicated(learner.names) 6 | if (any(dupl)) { 7 | dupl.learners = unique(learner.names[dupl]) 8 | stopf("Learner short names are not unique for: %s. \n Set 'pretty.names = FALSE' to resolve this.", 9 | collapse(dupl.learners)) 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /addon/convert_to_ascii_news.sh: -------------------------------------------------------------------------------- 1 | # Part 1: Replaces all level 2 headers and appends a ":" at the end of the line 2 | # Part 2: Indents all bullet points with a whitespace 3 | # Part 3: Removes all level 2 headers 4 | # Part 4: For all level 1 headers, add linebreak and 80 hyphens (not strictly required but clean) 5 | # Part 5: Remove all level 1 headers 6 | 7 | sed -e '/^##/ s/$/:/' -e 's/^*/ */' -e 's/^## *//' -e "/^#/a\\--------------------------------------------------------------------------------" -e 's/^# *//' < NEWS.md > NEWS 8 | -------------------------------------------------------------------------------- /addon/imgs/mlrLogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/addon/imgs/mlrLogo.png -------------------------------------------------------------------------------- /addon/imgs/mlrLogo_black_on_white320square.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/addon/imgs/mlrLogo_black_on_white320square.png -------------------------------------------------------------------------------- /addon/imgs/mlrLogo_black_on_white32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/addon/imgs/mlrLogo_black_on_white32x32.png -------------------------------------------------------------------------------- /data-raw/gunpoint.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data-raw/gunpoint.RData -------------------------------------------------------------------------------- /data-raw/yeast.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data-raw/yeast.RData -------------------------------------------------------------------------------- /data/agri.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/agri.task.rda -------------------------------------------------------------------------------- /data/bc.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/bc.task.rda -------------------------------------------------------------------------------- /data/bh.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/bh.task.rda -------------------------------------------------------------------------------- /data/costiris.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/costiris.task.rda -------------------------------------------------------------------------------- /data/fuelsubset.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/fuelsubset.task.rda -------------------------------------------------------------------------------- /data/gunpoint.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/gunpoint.task.rda -------------------------------------------------------------------------------- /data/iris.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/iris.task.rda -------------------------------------------------------------------------------- /data/lung.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/lung.task.rda -------------------------------------------------------------------------------- /data/mtcars.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/mtcars.task.rda -------------------------------------------------------------------------------- /data/phoneme.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/phoneme.task.rda -------------------------------------------------------------------------------- /data/pid.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/pid.task.rda -------------------------------------------------------------------------------- /data/sonar.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/sonar.task.rda -------------------------------------------------------------------------------- /data/spam.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/spam.task.rda -------------------------------------------------------------------------------- /data/spatial.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/spatial.task.rda -------------------------------------------------------------------------------- /data/wpbc.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/wpbc.task.rda -------------------------------------------------------------------------------- /data/yeast.task.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/data/yeast.task.rda -------------------------------------------------------------------------------- /inst/examples/MultilabelWrapper.R: -------------------------------------------------------------------------------- 1 | d = getTaskData(yeast.task) 2 | # drop some labels so example runs faster 3 | d = d[seq(1, nrow(d), by = 20), c(1:2, 15:17)] 4 | task = makeMultilabelTask(data = d, target = c("label1", "label2")) 5 | lrn = makeLearner("classif.rpart") 6 | lrn = makeMultilabelBinaryRelevanceWrapper(lrn) 7 | lrn = setPredictType(lrn, "prob") 8 | # train, predict and evaluate 9 | mod = train(lrn, task) 10 | pred = predict(mod, task) 11 | performance(pred, measure = list(multilabel.hamloss, multilabel.subset01, multilabel.f1)) 12 | # the next call basically has the same structure for any multilabel meta wrapper 13 | getMultilabelBinaryPerformances(pred, measures = list(mmce, auc)) 14 | # above works also with predictions from resample! 15 | -------------------------------------------------------------------------------- /inst/old-tutorials.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/inst/old-tutorials.zip -------------------------------------------------------------------------------- /man-roxygen/arg_aggr.R: -------------------------------------------------------------------------------- 1 | #' @param aggr ([Aggregation])\cr 2 | #' Aggregation function. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_aggregation_method.R: -------------------------------------------------------------------------------- 1 | #' @param aggregation (`character(1)`) \cr 2 | #' \dQuote{mean} or \dQuote{default}. See [getBMRAggrPerformances] 3 | #' for details on \dQuote{default}. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_bmr.R: -------------------------------------------------------------------------------- 1 | #' @param bmr ([BenchmarkResult])\cr 2 | #' Benchmark result. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_bmr_asdf.R: -------------------------------------------------------------------------------- 1 | #' @param as.df (`character(1)`)\cr 2 | #' Return one [data.frame] as result - or a list of lists of objects?. 3 | #' Default is `FALSE`. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_bmr_drop.R: -------------------------------------------------------------------------------- 1 | #' @param drop (`logical(1)`)\cr 2 | #' If drop is `FALSE` (the default), a nested list with 3 | #' the following structure is returned:\cr 4 | #' `res[task.ids][learner.ids]`.\cr 5 | #' If drop is set to `TRUE` it is checked if the list 6 | #' structure can be simplified.\cr 7 | #' If only one learner was passed, a list with entries 8 | #' for each task is returned.\cr 9 | #' If only one task was passed, the entries are named after 10 | #' the corresponding learner.\cr 11 | #' For an experiment with both one task and learner, 12 | #' the whole list structure is removed.\cr 13 | #' Note that the name of the 14 | #' task/learner will be dropped from the return object. 15 | #' @md 16 | -------------------------------------------------------------------------------- /man-roxygen/arg_bmr_learnerids.R: -------------------------------------------------------------------------------- 1 | #' @param learner.ids (`character(1)`)\cr 2 | #' Restrict result to certain learners. 3 | #' Default is all. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_bmr_taskids.R: -------------------------------------------------------------------------------- 1 | #' @param task.ids (`character(1)`)\cr 2 | #' Restrict result to certain tasks. 3 | #' Default is all. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_exclude.R: -------------------------------------------------------------------------------- 1 | #' @param exclude ([character])\cr 2 | #' Names of the columns to exclude. 3 | #' The target does not have to be included here. 4 | #' Default is none. 5 | #' @md 6 | -------------------------------------------------------------------------------- /man-roxygen/arg_facet_nrow_ncol.R: -------------------------------------------------------------------------------- 1 | #' @param facet.wrap.nrow,facet.wrap.ncol ([integer])\cr 2 | #' Number of rows and columns for facetting. Default for both is `NULL`. 3 | #' In this case ggplot's `facet_wrap` will choose the layout itself. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_features.R: -------------------------------------------------------------------------------- 1 | #' @param features ([character] | [integer] | [logical])\cr 2 | #' Vector of selected inputs. You can either pass a character vector with the 3 | #' feature names, a vector of indices, or a logical vector.\cr 4 | #' In case of an index vector each element denotes the position of the feature 5 | #' name returned by [getTaskFeatureNames].\cr 6 | #' Note that the target feature is always included in the 7 | #' resulting task, you should not pass it here. 8 | #' Default is to use all features. 9 | #' @md 10 | -------------------------------------------------------------------------------- /man-roxygen/arg_fsres.R: -------------------------------------------------------------------------------- 1 | #' @param res ([FeatSelResult])\cr 2 | #' The result of of [selectFeatures]. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_imputey.R: -------------------------------------------------------------------------------- 1 | #' @param impute.val ([numeric])\cr 2 | #' If something goes wrong during optimization (e.g. the learner crashes), 3 | #' this value is fed back to the tuner, so the tuning algorithm does not abort. 4 | #' Imputation is only active if `on.learner.error` is configured not to stop in [configureMlr]. 5 | #' It is not stored in the optimization path, an NA and a corresponding error message are 6 | #' logged instead. 7 | #' Note that this value is later multiplied by -1 for maximization measures internally, so you 8 | #' need to enter a larger positive value for maximization here as well. 9 | #' Default is the worst obtainable value of the performance measure you optimize for when 10 | #' you aggregate by mean value, or `Inf` instead. 11 | #' For multi-criteria optimization pass a vector of imputation values, one for each of your measures, 12 | #' in the same order as your measures. 13 | #' @md 14 | -------------------------------------------------------------------------------- /man-roxygen/arg_keep_extract.R: -------------------------------------------------------------------------------- 1 | #' @param keep.extract (`logical(1)`)\cr 2 | #' Keep the `extract` slot of the result object. When creating a lot of 3 | #' benchmark results with extensive tuning, the resulting R objects can become 4 | #' very large in size. That is why the tuning results stored in the `extract` 5 | #' slot are removed by default (`keep.extract = FALSE`). Note that when 6 | #' `keep.extract = FALSE` you will not be able to conduct analysis in the 7 | #' tuning results. 8 | #' @md 9 | -------------------------------------------------------------------------------- /man-roxygen/arg_keep_pred.R: -------------------------------------------------------------------------------- 1 | #' @param keep.pred (`logical(1)`)\cr 2 | #' Keep the prediction data in the `pred` slot of the result object. 3 | #' If you do many experiments (on larger data sets) these objects might unnecessarily increase 4 | #' object size / mem usage, if you do not really need them. 5 | #' The default is set to `TRUE`. 6 | #' @md 7 | -------------------------------------------------------------------------------- /man-roxygen/arg_learner.R: -------------------------------------------------------------------------------- 1 | #' @param learner ([Learner] | `character(1)`)\cr 2 | #' The learner. 3 | #' If you pass a string the learner will be created via [makeLearner]. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_learner_classif.R: -------------------------------------------------------------------------------- 1 | #' @param learner ([Learner] | `character(1)`)\cr 2 | #' The classification learner. 3 | #' If you pass a string the learner will be created via [makeLearner]. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_learner_regr.R: -------------------------------------------------------------------------------- 1 | #' @param learner ([Learner] | `character(1)`)\cr 2 | #' The regression learner. 3 | #' If you pass a string the learner will be created via [makeLearner]. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_log_fun.R: -------------------------------------------------------------------------------- 1 | #' @param log.fun (`function` | `character(1)`)\cr 2 | #' Function used for logging. If set to \dQuote{default} (the default), the evaluated design points, the resulting 3 | #' performances, and the runtime will be reported. 4 | #' If set to \dQuote{memory} the memory usage for each evaluation will also be displayed, with `character(1)` small increase 5 | #' in run time. 6 | #' Otherwise `character(1)` function with arguments `learner`, `resampling`, `measures`, 7 | #' `par.set`, `control`, `opt.path`, `dob`, `x`, `y`, `remove.nas`, 8 | #' `stage` and `prev.stage` is expected. 9 | #' The default displays the performance measures, the time needed for evaluating, 10 | #' the currently used memory and the max memory ever used before 11 | #' (the latter two both taken from [gc]). 12 | #' See the implementation for details. 13 | #' @md 14 | -------------------------------------------------------------------------------- /man-roxygen/arg_lrncl.R: -------------------------------------------------------------------------------- 1 | #' @param cl (`character(1)`)\cr 2 | #' Class of learner. By convention, all classification learners 3 | #' start with \dQuote{classif.} all regression learners with 4 | #' \dQuote{regr.} all survival learners start with \dQuote{surv.} 5 | #' all clustering learners with \dQuote{cluster.} and all multilabel 6 | #' classification learners start with \dQuote{multilabel.}. 7 | #' A list of all integrated learners is available on the 8 | #' [learners] help page. 9 | #' @md 10 | -------------------------------------------------------------------------------- /man-roxygen/arg_measure.R: -------------------------------------------------------------------------------- 1 | #' @param measure ([Measure])\cr 2 | #' Performance measure. 3 | #' Default is the first measure used in the benchmark experiment. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_measures.R: -------------------------------------------------------------------------------- 1 | #' @param measures ([Measure] | list of [Measure])\cr 2 | #' Performance measure(s) to evaluate. 3 | #' Default is the default measure for the task, see here [getDefaultMeasure]. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_measures_opt.R: -------------------------------------------------------------------------------- 1 | #' @param measures (list of [Measure] | [Measure])\cr 2 | #' Performance measures to evaluate. The first measure, aggregated by the first aggregation function 3 | #' is optimized, others are simply evaluated. 4 | #' Default is the default measure for the task, see here [getDefaultMeasure]. 5 | #' @md 6 | -------------------------------------------------------------------------------- /man-roxygen/arg_models.R: -------------------------------------------------------------------------------- 1 | #' @param models (`logical(1)`)\cr 2 | #' Should all fitted models be stored in the [ResampleResult]? 3 | #' Default is `FALSE`. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_multilabel_cvfolds.R: -------------------------------------------------------------------------------- 1 | #' @param cv.folds (`integer(1)`)\cr 2 | #' The number of folds for the inner cross validation method to predict labels for the augmented feature space. Default is `2`. 3 | #' 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_multilabel_order.R: -------------------------------------------------------------------------------- 1 | #' @param order ([character])\cr 2 | #' Specifies the chain order using the names of the target labels. 3 | #' E.g. for `m` target labels, this must be a character vector of length `m` that contains a permutation of the target label names. 4 | #' Default is `NULL` which uses a random ordering of the target label names. 5 | #' @md 6 | -------------------------------------------------------------------------------- /man-roxygen/arg_order_lrns.R: -------------------------------------------------------------------------------- 1 | #' @param order.lrns (`character(n.learners)`)\cr 2 | #' Character vector with `learner.ids` in new order. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_order_tsks.R: -------------------------------------------------------------------------------- 1 | #' @param order.tsks (`character(n.tasks)`)\cr 2 | #' Character vector with `task.ids` in new order. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_plotroc_obj.R: -------------------------------------------------------------------------------- 1 | #' @param obj (list of [Prediction] | list of [ResampleResult] | [BenchmarkResult])\cr 2 | #' Single prediction object, list of them, single resample result, list of them, or a benchmark result. 3 | #' In case of a list probably produced by different learners you want to compare, then 4 | #' name the list with the names you want to see in the plots, probably 5 | #' learner shortnames or ids. 6 | #' @md 7 | -------------------------------------------------------------------------------- /man-roxygen/arg_pred.R: -------------------------------------------------------------------------------- 1 | #' @param pred ([Prediction])\cr 2 | #' Prediction object. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_predictthreshold.R: -------------------------------------------------------------------------------- 1 | #' @param predict.threshold ([numeric])\cr 2 | #' Threshold to produce class labels. Has to be a named vector, where names correspond to class labels. 3 | #' Only for binary classification it can be a single numerical threshold for the positive class. 4 | #' See [setThreshold] for details on how it is applied. 5 | #' Default is `NULL` which means 0.5 / an equal threshold for each class. 6 | #' @md 7 | -------------------------------------------------------------------------------- /man-roxygen/arg_prettynames.R: -------------------------------------------------------------------------------- 1 | #' @param pretty.names (`logical(1)`)\cr 2 | #' Whether to use the short name of the learner instead of its ID in labels. Defaults to `TRUE`. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_showinfo.R: -------------------------------------------------------------------------------- 1 | #' @param show.info (`logical(1)`)\cr 2 | #' Print verbose output on console? 3 | #' Default is set via [configureMlr]. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_subset.R: -------------------------------------------------------------------------------- 1 | #' @param subset ([integer] | [logical] | `NULL`)\cr 2 | #' Selected cases. Either a logical or an index vector. 3 | #' By default `NULL` if all observations are used. 4 | #' @md 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_target12.R: -------------------------------------------------------------------------------- 1 | #' @param target (`character(1)` | `character(2)`)]\cr 2 | #' Name of the target variable(s). 3 | #' For survival analysis these are the names of the survival time and event columns, 4 | #' so it has length 2. 5 | #' @md 6 | -------------------------------------------------------------------------------- /man-roxygen/arg_task.R: -------------------------------------------------------------------------------- 1 | #' @param task ([Task])\cr 2 | #' The task. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_task_or_desc.R: -------------------------------------------------------------------------------- 1 | #' @param x ([Task] | [TaskDesc])\cr 2 | #' Task or its description object. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_task_or_type.R: -------------------------------------------------------------------------------- 1 | #' @param obj (`character(1)` | [Task])\cr 2 | #' Either `character(1)` task or the type of the task, in the latter case one of: 3 | #' \dQuote{classif} \dQuote{regr} \dQuote{surv} \dQuote{costsens} \dQuote{cluster} \dQuote{multilabel}. 4 | #' Default is `NA` matching all types. 5 | -------------------------------------------------------------------------------- /man-roxygen/arg_taskdesc.R: -------------------------------------------------------------------------------- 1 | #' @param task.desc [TaskDesc]\cr 2 | #' Task description object. 3 | -------------------------------------------------------------------------------- /man-roxygen/arg_taskdf.R: -------------------------------------------------------------------------------- 1 | #' @param obj ([data.frame] | [Task])\cr 2 | #' Input data. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/arg_taskdf_target.R: -------------------------------------------------------------------------------- 1 | #' @param target (`character(1)` | `character(2)` | `character(n.classes)`)\cr 2 | #' Name(s) of the target variable(s). 3 | #' Only used when `obj` is a data.frame, otherwise ignored. 4 | #' If survival analysis is applicable, these are the names of the survival time and event columns, 5 | #' so it has length 2. 6 | #' For multilabel classification these are the names of logical columns that indicate whether 7 | #' a class label is present and the number of target variables corresponds to the number of 8 | #' classes. 9 | #' @md 10 | -------------------------------------------------------------------------------- /man-roxygen/arg_wrappedmod.R: -------------------------------------------------------------------------------- 1 | #' @param model ([WrappedModel])\cr 2 | #' The model. 3 | #' @md 4 | -------------------------------------------------------------------------------- /man-roxygen/ret_bmr_list_or_df.R: -------------------------------------------------------------------------------- 1 | #' @return ([list] | [data.frame]). See above. 2 | #' @md 3 | -------------------------------------------------------------------------------- /man-roxygen/ret_gg2.R: -------------------------------------------------------------------------------- 1 | #' @return ggplot2 plot object. 2 | #' @md 3 | -------------------------------------------------------------------------------- /man-roxygen/ret_inv_null.R: -------------------------------------------------------------------------------- 1 | #' @return (`invisible(NULL)`). 2 | #' @md 3 | -------------------------------------------------------------------------------- /man-roxygen/ret_learner.R: -------------------------------------------------------------------------------- 1 | #' @return [Learner]. 2 | #' @md 3 | -------------------------------------------------------------------------------- /man-roxygen/ret_measure.R: -------------------------------------------------------------------------------- 1 | #' @return [Measure]. 2 | #' @md 3 | -------------------------------------------------------------------------------- /man-roxygen/ret_ps.R: -------------------------------------------------------------------------------- 1 | #' @return [ParamSet]. 2 | #' @md 3 | -------------------------------------------------------------------------------- /man-roxygen/ret_task.R: -------------------------------------------------------------------------------- 1 | #' @return [Task]. 2 | #' @md 3 | -------------------------------------------------------------------------------- /man-roxygen/ret_taskdesc.R: -------------------------------------------------------------------------------- 1 | #' @return [TaskDesc]. 2 | #' @md 3 | -------------------------------------------------------------------------------- /man-roxygen/ret_taskdf.R: -------------------------------------------------------------------------------- 1 | #' @return [data.frame] | [Task]. Same type as `obj`. 2 | #' @md 3 | -------------------------------------------------------------------------------- /man-roxygen/ret_wmodel.R: -------------------------------------------------------------------------------- 1 | #' @return [WrappedModel]. 2 | #' @md 3 | -------------------------------------------------------------------------------- /man/Aggregation.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Aggregation.R 3 | \name{Aggregation} 4 | \alias{Aggregation} 5 | \title{Aggregation object.} 6 | \description{ 7 | An aggregation method reduces the performance values of the test 8 | (and possibly the training sets) to a single value. 9 | To see all possible implemented aggregations look at \link{aggregations}. 10 | 11 | The aggregation can access all relevant information of the result after resampling 12 | and combine them into a single value. Though usually something very simple 13 | like taking the mean of the test set performances is done. 14 | 15 | Object members: 16 | \describe{ 17 | \item{id (\code{character(1)})}{Name of the aggregation method.} 18 | \item{name (\code{character(1)})}{Long name of the aggregation method.} 19 | \item{properties (\link{character})}{Properties of the aggregation.} 20 | \item{fun (`function(task, perf.test, perf.train, measure, group, pred)])}{Aggregation function.} 21 | } 22 | } 23 | \seealso{ 24 | \link{makeAggregation} 25 | } 26 | -------------------------------------------------------------------------------- /man/MeasureProperties.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Measure_properties.R 3 | \name{MeasureProperties} 4 | \alias{MeasureProperties} 5 | \alias{getMeasureProperties} 6 | \alias{hasMeasureProperties} 7 | \title{Query properties of measures.} 8 | \usage{ 9 | getMeasureProperties(measure) 10 | 11 | hasMeasureProperties(measure, props) 12 | } 13 | \arguments{ 14 | \item{measure}{(\link{Measure})\cr 15 | Performance measure. 16 | Default is the first measure used in the benchmark experiment.} 17 | 18 | \item{props}{(\link{character})\cr 19 | Vector of properties to query.} 20 | } 21 | \value{ 22 | \code{getMeasureProperties} returns a character vector with measure properties. 23 | \code{hasMeasureProperties} returns a logical vector of the same length as \code{props}. 24 | } 25 | \description{ 26 | Properties can be accessed with \code{getMeasureProperties(measure)}, which returns a 27 | character vector. 28 | 29 | The measure properties are defined in \link{Measure}. 30 | } 31 | -------------------------------------------------------------------------------- /man/agri.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{agri.task} 5 | \alias{agri.task} 6 | \title{European Union Agricultural Workforces clustering task.} 7 | \description{ 8 | Contains the task (\code{agri.task}). 9 | } 10 | \references{ 11 | See \link[cluster:agriculture]{cluster::agriculture}. 12 | } 13 | \keyword{data} 14 | -------------------------------------------------------------------------------- /man/asROCRPrediction.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/asROCRPrediction.R 3 | \name{asROCRPrediction} 4 | \alias{asROCRPrediction} 5 | \title{Converts predictions to a format package ROCR can handle.} 6 | \usage{ 7 | asROCRPrediction(pred) 8 | } 9 | \arguments{ 10 | \item{pred}{(\link{Prediction})\cr 11 | Prediction object.} 12 | } 13 | \description{ 14 | Converts predictions to a format package ROCR can handle. 15 | } 16 | \seealso{ 17 | Other roc: 18 | \code{\link{calculateROCMeasures}()} 19 | 20 | Other predict: 21 | \code{\link{getPredictionProbabilities}()}, 22 | \code{\link{getPredictionResponse}()}, 23 | \code{\link{getPredictionTaskDesc}()}, 24 | \code{\link{predict.WrappedModel}()}, 25 | \code{\link{setPredictThreshold}()}, 26 | \code{\link{setPredictType}()} 27 | } 28 | \concept{predict} 29 | \concept{roc} 30 | -------------------------------------------------------------------------------- /man/bc.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{bc.task} 5 | \alias{bc.task} 6 | \title{Wisconsin Breast Cancer classification task.} 7 | \description{ 8 | Contains the task (\code{bc.task}). 9 | } 10 | \references{ 11 | See \link[mlbench:BreastCancer]{mlbench::BreastCancer}. 12 | The column \code{"Id"} and all incomplete cases have been removed from the task. 13 | } 14 | \keyword{data} 15 | -------------------------------------------------------------------------------- /man/bh.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{bh.task} 5 | \alias{bh.task} 6 | \title{Boston Housing regression task.} 7 | \description{ 8 | Contains the task (\code{bh.task}). 9 | } 10 | \references{ 11 | See \link[mlbench:BostonHousing]{mlbench::BostonHousing}. 12 | } 13 | \keyword{data} 14 | -------------------------------------------------------------------------------- /man/cache_helpers.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/cache_helpers.R 3 | \name{cache_helpers} 4 | \alias{cache_helpers} 5 | \alias{getCacheDir} 6 | \alias{deleteCacheDir} 7 | \title{Get or delete mlr cache directory} 8 | \usage{ 9 | getCacheDir() 10 | 11 | deleteCacheDir() 12 | } 13 | \description{ 14 | Helper functions to deal with mlr caching. 15 | } 16 | \details{ 17 | \code{getCacheDir()} returns the default mlr cache directory \cr 18 | \code{deleteCacheDir()} clears the default mlr cache directory. Custom cache 19 | directories must be deleted by hand. 20 | } 21 | -------------------------------------------------------------------------------- /man/changeData.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Task_operators.R 3 | \name{changeData} 4 | \alias{changeData} 5 | \title{Change Task Data} 6 | \usage{ 7 | changeData(task, data, costs, weights, coordinates) 8 | } 9 | \arguments{ 10 | \item{task}{(\link{Task})\cr 11 | The task.} 12 | 13 | \item{data}{(\link{data.frame})\cr 14 | The new data to associate with the task. The names and types of the feature columns must match with the old data.} 15 | 16 | \item{costs}{([data.frame`\cr 17 | Optional: cost matrix.} 18 | 19 | \item{weights}{(\link{numeric})\cr 20 | Optional: weight vector.} 21 | } 22 | \description{ 23 | Mainly for internal use. Changes the data associated with a task, without modifying other task properties. 24 | } 25 | \keyword{internal} 26 | -------------------------------------------------------------------------------- /man/checkLearner.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/checkLearner.R 3 | \name{checkLearner} 4 | \alias{checkLearner} 5 | \title{Exported for internal use only.} 6 | \usage{ 7 | checkLearner(learner, type = NULL, props = NULL) 8 | } 9 | \arguments{ 10 | \item{learner}{(\link{Learner} | \code{character(1)})\cr 11 | The learner to check, or the name of the learner to create} 12 | 13 | \item{type}{(\code{character(1)})\cr 14 | What type of learner to require.} 15 | 16 | \item{props}{(\code{character(1)})\cr 17 | What properties to require.} 18 | } 19 | \description{ 20 | Exported for internal use only. 21 | } 22 | \keyword{internal} 23 | -------------------------------------------------------------------------------- /man/checkPredictLearnerOutput.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predictLearner.R 3 | \name{checkPredictLearnerOutput} 4 | \alias{checkPredictLearnerOutput} 5 | \title{Check output returned by predictLearner.} 6 | \usage{ 7 | checkPredictLearnerOutput(learner, model, p) 8 | } 9 | \arguments{ 10 | \item{learner}{(\link{Learner})\cr 11 | The learner.} 12 | 13 | \item{model}{(\link{WrappedModel})]\cr 14 | Model produced by training.} 15 | 16 | \item{p}{(any)\cr 17 | The prediction made by \code{learner}.} 18 | } 19 | \value{ 20 | (any). A sanitized version of \code{p}. 21 | } 22 | \description{ 23 | Check the output coming from a Learner's internal 24 | \code{predictLearner} function. 25 | 26 | This function is for internal use. 27 | } 28 | \keyword{internal} 29 | -------------------------------------------------------------------------------- /man/convertMLBenchObjToTask.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/convertMLBenchObjToTask.R 3 | \name{convertMLBenchObjToTask} 4 | \alias{convertMLBenchObjToTask} 5 | \title{Convert a machine learning benchmark / demo object from package mlbench to a task.} 6 | \usage{ 7 | convertMLBenchObjToTask(x, n = 100L, ...) 8 | } 9 | \arguments{ 10 | \item{x}{(\code{character(1)})\cr 11 | Name of an mlbench function or dataset.} 12 | 13 | \item{n}{(\code{integer(1)})\cr 14 | Number of observations for data simul functions. 15 | Note that for a few mlbench function this setting is not exactly respected by mlbench. 16 | Default is 100.} 17 | 18 | \item{...}{(any)\cr 19 | Passed on to data simul functions.} 20 | } 21 | \description{ 22 | We auto-set the target column, drop any column which is called \dQuote{Id} and 23 | convert logicals to factors. 24 | } 25 | \examples{ 26 | print(convertMLBenchObjToTask("Ionosphere")) 27 | print(convertMLBenchObjToTask("mlbench.spirals", n = 100, sd = 0.1)) 28 | } 29 | -------------------------------------------------------------------------------- /man/costiris.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{costiris.task} 5 | \alias{costiris.task} 6 | \title{Iris cost-sensitive classification task.} 7 | \description{ 8 | Contains the task (\code{costiris.task}). 9 | } 10 | \references{ 11 | See \link[datasets:iris]{datasets::iris}. 12 | The cost matrix was generated artificially following 13 | 14 | Tu, H.-H. and Lin, H.-T. (2010), One-sided support vector regression for multiclass cost-sensitive classification. 15 | In ICML, J. Fürnkranz and T. Joachims, Eds., Omnipress, 1095--1102. 16 | } 17 | \keyword{data} 18 | -------------------------------------------------------------------------------- /man/crossover.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/crossover.R 3 | \name{crossover} 4 | \alias{crossover} 5 | \title{Crossover.} 6 | \arguments{ 7 | \item{x}{(\link{logical})\cr 8 | First parent string.} 9 | 10 | \item{y}{(\link{logical})\cr 11 | Second parent string.} 12 | 13 | \item{rate}{(\code{numeric(1)})\cr 14 | A number representing the probability of selecting an element of the first string. 15 | Default is \code{0.5}.} 16 | } 17 | \value{ 18 | (\link{crossover}). 19 | } 20 | \description{ 21 | Takes two bit strings and creates a new one of the same size by selecting the items from the first string or 22 | the second, based on a given rate (the probability of choosing an element from the first string). 23 | } 24 | -------------------------------------------------------------------------------- /man/dropFeatures.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/dropFeatures.R 3 | \name{dropFeatures} 4 | \alias{dropFeatures} 5 | \title{Drop some features of task.} 6 | \usage{ 7 | dropFeatures(task, features) 8 | } 9 | \arguments{ 10 | \item{task}{(\link{Task})\cr 11 | The task.} 12 | 13 | \item{features}{(\link{character})\cr 14 | Features to drop.} 15 | } 16 | \value{ 17 | \link{Task}. 18 | } 19 | \description{ 20 | Drop some features of task. 21 | } 22 | \seealso{ 23 | Other eda_and_preprocess: 24 | \code{\link{capLargeValues}()}, 25 | \code{\link{createDummyFeatures}()}, 26 | \code{\link{mergeSmallFactorLevels}()}, 27 | \code{\link{normalizeFeatures}()}, 28 | \code{\link{removeConstantFeatures}()}, 29 | \code{\link{summarizeColumns}()}, 30 | \code{\link{summarizeLevels}()} 31 | } 32 | \concept{eda_and_preprocess} 33 | -------------------------------------------------------------------------------- /man/estimateResidualVariance.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/estimateResidualVariance.R 3 | \name{estimateResidualVariance} 4 | \alias{estimateResidualVariance} 5 | \title{Estimate the residual variance.} 6 | \usage{ 7 | estimateResidualVariance(x, task, data, target) 8 | } 9 | \arguments{ 10 | \item{x}{(\link{Learner} or \link{WrappedModel})\cr 11 | Learner or wrapped model.} 12 | 13 | \item{task}{(\link{RegrTask})\cr 14 | Regression task. 15 | If missing, \code{data} and \code{target} must be supplied.} 16 | 17 | \item{data}{(\link{data.frame})\cr 18 | A data frame containing the features and target variable. 19 | If missing, \code{task} must be supplied.} 20 | 21 | \item{target}{(\code{character(1)})\cr 22 | Name of the target variable. 23 | If missing, \code{task} must be supplied.} 24 | } 25 | \description{ 26 | Estimate the residual variance of a regression model on a given task. 27 | If a regression learner is provided instead of a model, the model is 28 | trained (see \link{train}) first. 29 | } 30 | -------------------------------------------------------------------------------- /man/figures/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/man/figures/logo.png -------------------------------------------------------------------------------- /man/fuelsubset.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{fuelsubset.task} 5 | \alias{fuelsubset.task} 6 | \title{FuelSubset functional data regression task.} 7 | \description{ 8 | Contains the task (\code{fuelsubset.task}). 9 | 2 functional covariates and 1 scalar covariate. 10 | You have to predict the heat value of some fuel based on the 11 | ultraviolet radiation spectrum and infrared ray radiation and one scalar 12 | column called h2o. 13 | } 14 | \details{ 15 | The features and grids are scaled in the same way as in \link[FDboost:FDboost]{FDboost::FDboost}. 16 | } 17 | \references{ 18 | See Brockhaus, S., Scheipl, F., Hothorn, T., & Greven, S. (2015). The functional linear array model. Statistical Modelling, 15(3), 279–300. 19 | } 20 | \keyword{data} 21 | -------------------------------------------------------------------------------- /man/getBMRTaskDescriptions.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/BenchmarkResult_operators.R 3 | \name{getBMRTaskDescriptions} 4 | \alias{getBMRTaskDescriptions} 5 | \title{Extract all task descriptions from benchmark result (DEPRECATED).} 6 | \usage{ 7 | getBMRTaskDescriptions(bmr) 8 | } 9 | \arguments{ 10 | \item{bmr}{(\link{BenchmarkResult})\cr 11 | Benchmark result.} 12 | } 13 | \value{ 14 | (\link{list}). 15 | } 16 | \description{ 17 | A list containing all \link{TaskDesc}s for each task contained in the benchmark experiment. 18 | } 19 | -------------------------------------------------------------------------------- /man/getDefaultMeasure.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Measure.R 3 | \name{getDefaultMeasure} 4 | \alias{getDefaultMeasure} 5 | \title{Get default measure.} 6 | \usage{ 7 | getDefaultMeasure(x) 8 | } 9 | \arguments{ 10 | \item{x}{([character(1)` | \link{Task} | \link{TaskDesc} | \link{Learner})\cr 11 | Task type, task, task description, learner name, a learner, or a type of learner (e.g. "classif").} 12 | } 13 | \value{ 14 | (\link{Measure}). 15 | } 16 | \description{ 17 | Get the default measure for a task type, task, task description or a learner. 18 | Currently these are: 19 | classif: mmce\cr 20 | regr: mse\cr 21 | cluster: db\cr 22 | surv: cindex\cr 23 | costsen: mcp\cr 24 | multilabel: multilabel.hamloss\cr 25 | } 26 | -------------------------------------------------------------------------------- /man/getFailureModelDump.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/WrappedModel.R 3 | \name{getFailureModelDump} 4 | \alias{getFailureModelDump} 5 | \title{Return the error dump of FailureModel.} 6 | \usage{ 7 | getFailureModelDump(model) 8 | } 9 | \arguments{ 10 | \item{model}{(\link{WrappedModel})\cr 11 | The model.} 12 | } 13 | \value{ 14 | (\code{last.dump}). 15 | } 16 | \description{ 17 | Returns the error dump that can be used with \code{debugger()} to evaluate errors. 18 | If \link{configureMlr} configuration \code{on.error.dump} is \code{FALSE}, this returns 19 | \code{NULL}. 20 | } 21 | -------------------------------------------------------------------------------- /man/getFailureModelMsg.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/WrappedModel.R 3 | \name{getFailureModelMsg} 4 | \alias{getFailureModelMsg} 5 | \title{Return error message of FailureModel.} 6 | \usage{ 7 | getFailureModelMsg(model) 8 | } 9 | \arguments{ 10 | \item{model}{(\link{WrappedModel})\cr 11 | The model.} 12 | } 13 | \value{ 14 | (\code{character(1)}). 15 | } 16 | \description{ 17 | Such a model is created when one sets the corresponding option in \link{configureMlr}. 18 | If no failure occurred, \code{NA} is returned. 19 | 20 | For complex wrappers this getter returns the first error message encountered in ANY model that failed. 21 | } 22 | -------------------------------------------------------------------------------- /man/getFeatSelResult.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/getFeatSelResult.R 3 | \name{getFeatSelResult} 4 | \alias{getFeatSelResult} 5 | \title{Returns the selected feature set and optimization path after training.} 6 | \usage{ 7 | getFeatSelResult(object) 8 | } 9 | \arguments{ 10 | \item{object}{(\link{WrappedModel})\cr 11 | Trained Model created with \link{makeFeatSelWrapper}.} 12 | } 13 | \value{ 14 | (\link{FeatSelResult}). 15 | } 16 | \description{ 17 | Returns the selected feature set and optimization path after training. 18 | } 19 | \seealso{ 20 | Other featsel: 21 | \code{\link{FeatSelControl}}, 22 | \code{\link{analyzeFeatSelResult}()}, 23 | \code{\link{makeFeatSelWrapper}()}, 24 | \code{\link{selectFeatures}()} 25 | } 26 | \concept{featsel} 27 | -------------------------------------------------------------------------------- /man/getFilteredFeatures.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/FilterWrapper.R 3 | \name{getFilteredFeatures} 4 | \alias{getFilteredFeatures} 5 | \title{Returns the filtered features.} 6 | \usage{ 7 | getFilteredFeatures(model) 8 | } 9 | \arguments{ 10 | \item{model}{(\link{WrappedModel})\cr 11 | Trained Model created with \link{makeFilterWrapper}.} 12 | } 13 | \value{ 14 | (\link{character}). 15 | } 16 | \description{ 17 | Returns the filtered features. 18 | } 19 | \seealso{ 20 | Other filter: 21 | \code{\link{filterFeatures}()}, 22 | \code{\link{generateFilterValuesData}()}, 23 | \code{\link{listFilterEnsembleMethods}()}, 24 | \code{\link{listFilterMethods}()}, 25 | \code{\link{makeFilterEnsemble}()}, 26 | \code{\link{makeFilterWrapper}()}, 27 | \code{\link{makeFilter}()}, 28 | \code{\link{plotFilterValues}()} 29 | } 30 | \concept{filter} 31 | -------------------------------------------------------------------------------- /man/getHomogeneousEnsembleModels.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/HomogeneousEnsemble.R 3 | \name{getHomogeneousEnsembleModels} 4 | \alias{getHomogeneousEnsembleModels} 5 | \title{Deprecated, use \code{getLearnerModel} instead.} 6 | \usage{ 7 | getHomogeneousEnsembleModels(model, learner.models = FALSE) 8 | } 9 | \arguments{ 10 | \item{model}{Deprecated.} 11 | 12 | \item{learner.models}{Deprecated.} 13 | } 14 | \description{ 15 | Deprecated, use \code{getLearnerModel} instead. 16 | } 17 | -------------------------------------------------------------------------------- /man/getMlrOptions.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/options.R 3 | \name{getMlrOptions} 4 | \alias{getMlrOptions} 5 | \title{Returns a list of mlr's options.} 6 | \usage{ 7 | getMlrOptions() 8 | } 9 | \value{ 10 | (\link{list}). 11 | } 12 | \description{ 13 | Gets the options for mlr. 14 | } 15 | \seealso{ 16 | Other configure: 17 | \code{\link{configureMlr}()} 18 | } 19 | \concept{configure} 20 | -------------------------------------------------------------------------------- /man/getOOBPredsLearner.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/getOOBPreds.R 3 | \name{getOOBPredsLearner} 4 | \alias{getOOBPredsLearner} 5 | \title{Provides out-of-bag predictions for a given model and the corresponding learner.} 6 | \usage{ 7 | getOOBPredsLearner(.learner, .model) 8 | } 9 | \arguments{ 10 | \item{.learner}{(\link{Learner})\cr 11 | The learner.} 12 | 13 | \item{.model}{(\link{WrappedModel})\cr 14 | Wrapped model.} 15 | } 16 | \value{ 17 | Same output structure as in (\link{predictLearner}). 18 | } 19 | \description{ 20 | This function is mostly for internal usage. To get out-of-bag predictions use \link{getOOBPreds}. 21 | } 22 | \keyword{internal} 23 | -------------------------------------------------------------------------------- /man/getPredictionDump.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Prediction_operators.R 3 | \name{getPredictionDump} 4 | \alias{getPredictionDump} 5 | \title{Return the error dump of a failed Prediction.} 6 | \usage{ 7 | getPredictionDump(pred) 8 | } 9 | \arguments{ 10 | \item{pred}{(\link{Prediction})\cr 11 | Prediction object.} 12 | } 13 | \value{ 14 | (\code{last.dump}). 15 | } 16 | \description{ 17 | Returns the error dump that can be used with \code{debugger()} to evaluate errors. 18 | If \link{configureMlr} configuration \code{on.error.dump} is \code{FALSE} or if the 19 | prediction did not fail, this returns \code{NULL}. 20 | } 21 | \seealso{ 22 | Other debug: 23 | \code{\link{FailureModel}}, 24 | \code{\link{ResampleResult}}, 25 | \code{\link{getRRDump}()} 26 | } 27 | \concept{debug} 28 | -------------------------------------------------------------------------------- /man/getPredictionTaskDesc.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Prediction_operators.R 3 | \name{getPredictionTaskDesc} 4 | \alias{getPredictionTaskDesc} 5 | \title{Get summarizing task description from prediction.} 6 | \usage{ 7 | getPredictionTaskDesc(pred) 8 | } 9 | \arguments{ 10 | \item{pred}{(\link{Prediction})\cr 11 | Prediction object.} 12 | } 13 | \value{ 14 | ret_taskdesc 15 | } 16 | \description{ 17 | See title. 18 | } 19 | \seealso{ 20 | Other predict: 21 | \code{\link{asROCRPrediction}()}, 22 | \code{\link{getPredictionProbabilities}()}, 23 | \code{\link{getPredictionResponse}()}, 24 | \code{\link{predict.WrappedModel}()}, 25 | \code{\link{setPredictThreshold}()}, 26 | \code{\link{setPredictType}()} 27 | } 28 | \concept{predict} 29 | -------------------------------------------------------------------------------- /man/getProbabilities.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Prediction_operators.R 3 | \name{getProbabilities} 4 | \alias{getProbabilities} 5 | \title{Deprecated, use \code{getPredictionProbabilities} instead.} 6 | \usage{ 7 | getProbabilities(pred, cl) 8 | } 9 | \arguments{ 10 | \item{pred}{Deprecated.} 11 | 12 | \item{cl}{Deprecated.} 13 | } 14 | \description{ 15 | Deprecated, use \code{getPredictionProbabilities} instead. 16 | } 17 | -------------------------------------------------------------------------------- /man/getRRPredictions.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ResampleResult_operators.R 3 | \name{getRRPredictions} 4 | \alias{getRRPredictions} 5 | \title{Get predictions from resample results.} 6 | \usage{ 7 | getRRPredictions(res) 8 | } 9 | \arguments{ 10 | \item{res}{(\link{ResampleResult})\cr 11 | The result of \link{resample} run with \code{keep.pred = TRUE}.} 12 | } 13 | \value{ 14 | (\link{ResamplePrediction}). 15 | } 16 | \description{ 17 | Very simple getter. 18 | } 19 | \seealso{ 20 | Other resample: 21 | \code{\link{ResamplePrediction}}, 22 | \code{\link{ResampleResult}}, 23 | \code{\link{addRRMeasure}()}, 24 | \code{\link{getRRPredictionList}()}, 25 | \code{\link{getRRTaskDescription}()}, 26 | \code{\link{getRRTaskDesc}()}, 27 | \code{\link{makeResampleDesc}()}, 28 | \code{\link{makeResampleInstance}()}, 29 | \code{\link{resample}()} 30 | } 31 | \concept{resample} 32 | -------------------------------------------------------------------------------- /man/getRRTaskDesc.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ResampleResult_operators.R 3 | \name{getRRTaskDesc} 4 | \alias{getRRTaskDesc} 5 | \title{Get task description from resample results (DEPRECATED).} 6 | \usage{ 7 | getRRTaskDesc(res) 8 | } 9 | \arguments{ 10 | \item{res}{(\link{ResampleResult})\cr 11 | The result of \link{resample}.} 12 | } 13 | \value{ 14 | (\link{TaskDesc}). 15 | } 16 | \description{ 17 | Get a summarizing task description. 18 | } 19 | \seealso{ 20 | Other resample: 21 | \code{\link{ResamplePrediction}}, 22 | \code{\link{ResampleResult}}, 23 | \code{\link{addRRMeasure}()}, 24 | \code{\link{getRRPredictionList}()}, 25 | \code{\link{getRRPredictions}()}, 26 | \code{\link{getRRTaskDescription}()}, 27 | \code{\link{makeResampleDesc}()}, 28 | \code{\link{makeResampleInstance}()}, 29 | \code{\link{resample}()} 30 | } 31 | \concept{resample} 32 | -------------------------------------------------------------------------------- /man/getRRTaskDescription.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ResampleResult_operators.R 3 | \name{getRRTaskDescription} 4 | \alias{getRRTaskDescription} 5 | \title{Get task description from resample results (DEPRECATED).} 6 | \usage{ 7 | getRRTaskDescription(res) 8 | } 9 | \arguments{ 10 | \item{res}{(\link{ResampleResult})\cr 11 | The result of \link{resample}.} 12 | } 13 | \value{ 14 | (\link{TaskDesc}). 15 | } 16 | \description{ 17 | Get a summarizing task description. 18 | } 19 | \seealso{ 20 | Other resample: 21 | \code{\link{ResamplePrediction}}, 22 | \code{\link{ResampleResult}}, 23 | \code{\link{addRRMeasure}()}, 24 | \code{\link{getRRPredictionList}()}, 25 | \code{\link{getRRPredictions}()}, 26 | \code{\link{getRRTaskDesc}()}, 27 | \code{\link{makeResampleDesc}()}, 28 | \code{\link{makeResampleInstance}()}, 29 | \code{\link{resample}()} 30 | } 31 | \concept{resample} 32 | -------------------------------------------------------------------------------- /man/getStackedBaseLearnerPredictions.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/StackedLearner.R 3 | \name{getStackedBaseLearnerPredictions} 4 | \alias{getStackedBaseLearnerPredictions} 5 | \title{Returns the predictions for each base learner.} 6 | \usage{ 7 | getStackedBaseLearnerPredictions(model, newdata = NULL) 8 | } 9 | \arguments{ 10 | \item{model}{(\link{WrappedModel})\cr Wrapped model, result of train.} 11 | 12 | \item{newdata}{(\link{data.frame})\cr 13 | New observations, for which the predictions using the specified base learners should be returned. 14 | Default is \code{NULL} and extracts the base learner predictions that were made during the training.} 15 | } 16 | \description{ 17 | Returns the predictions for each base learner. 18 | } 19 | \details{ 20 | None. 21 | } 22 | -------------------------------------------------------------------------------- /man/getTaskDesc.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Task_operators.R 3 | \name{getTaskDesc} 4 | \alias{getTaskDesc} 5 | \title{Get a summarizing task description.} 6 | \usage{ 7 | getTaskDesc(x) 8 | } 9 | \arguments{ 10 | \item{x}{(\link{Task} | \link{TaskDesc})\cr 11 | Task or its description object.} 12 | } 13 | \value{ 14 | ret_taskdesc 15 | } 16 | \description{ 17 | See title. 18 | } 19 | \seealso{ 20 | Other task: 21 | \code{\link{getTaskClassLevels}()}, 22 | \code{\link{getTaskCosts}()}, 23 | \code{\link{getTaskData}()}, 24 | \code{\link{getTaskFeatureNames}()}, 25 | \code{\link{getTaskFormula}()}, 26 | \code{\link{getTaskId}()}, 27 | \code{\link{getTaskNFeats}()}, 28 | \code{\link{getTaskSize}()}, 29 | \code{\link{getTaskTargetNames}()}, 30 | \code{\link{getTaskTargets}()}, 31 | \code{\link{getTaskType}()}, 32 | \code{\link{subsetTask}()} 33 | } 34 | \concept{task} 35 | -------------------------------------------------------------------------------- /man/getTaskDescription.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Task_operators.R 3 | \name{getTaskDescription} 4 | \alias{getTaskDescription} 5 | \title{Deprecated, use \link{getTaskDesc} instead.} 6 | \usage{ 7 | getTaskDescription(x) 8 | } 9 | \arguments{ 10 | \item{x}{(\link{Task} | \link{TaskDesc})\cr 11 | Task or its description object.} 12 | } 13 | \description{ 14 | Deprecated, use \link{getTaskDesc} instead. 15 | } 16 | -------------------------------------------------------------------------------- /man/getTaskFeatureNames.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Task_operators.R 3 | \name{getTaskFeatureNames} 4 | \alias{getTaskFeatureNames} 5 | \title{Get feature names of task.} 6 | \usage{ 7 | getTaskFeatureNames(task) 8 | } 9 | \arguments{ 10 | \item{task}{(\link{Task})\cr 11 | The task.} 12 | } 13 | \value{ 14 | (\link{character}). 15 | } 16 | \description{ 17 | Target column name is not included. 18 | } 19 | \seealso{ 20 | Other task: 21 | \code{\link{getTaskClassLevels}()}, 22 | \code{\link{getTaskCosts}()}, 23 | \code{\link{getTaskData}()}, 24 | \code{\link{getTaskDesc}()}, 25 | \code{\link{getTaskFormula}()}, 26 | \code{\link{getTaskId}()}, 27 | \code{\link{getTaskNFeats}()}, 28 | \code{\link{getTaskSize}()}, 29 | \code{\link{getTaskTargetNames}()}, 30 | \code{\link{getTaskTargets}()}, 31 | \code{\link{getTaskType}()}, 32 | \code{\link{subsetTask}()} 33 | } 34 | \concept{task} 35 | -------------------------------------------------------------------------------- /man/getTaskId.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Task_operators.R 3 | \name{getTaskId} 4 | \alias{getTaskId} 5 | \title{Get the id of the task.} 6 | \usage{ 7 | getTaskId(x) 8 | } 9 | \arguments{ 10 | \item{x}{(\link{Task} | \link{TaskDesc})\cr 11 | Task or its description object.} 12 | } 13 | \value{ 14 | (\code{character(1)}). 15 | } 16 | \description{ 17 | See title. 18 | } 19 | \seealso{ 20 | Other task: 21 | \code{\link{getTaskClassLevels}()}, 22 | \code{\link{getTaskCosts}()}, 23 | \code{\link{getTaskData}()}, 24 | \code{\link{getTaskDesc}()}, 25 | \code{\link{getTaskFeatureNames}()}, 26 | \code{\link{getTaskFormula}()}, 27 | \code{\link{getTaskNFeats}()}, 28 | \code{\link{getTaskSize}()}, 29 | \code{\link{getTaskTargetNames}()}, 30 | \code{\link{getTaskTargets}()}, 31 | \code{\link{getTaskType}()}, 32 | \code{\link{subsetTask}()} 33 | } 34 | \concept{task} 35 | -------------------------------------------------------------------------------- /man/getTaskNFeats.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Task_operators.R 3 | \name{getTaskNFeats} 4 | \alias{getTaskNFeats} 5 | \title{Get number of features in task.} 6 | \usage{ 7 | getTaskNFeats(x) 8 | } 9 | \arguments{ 10 | \item{x}{(\link{Task} | \link{TaskDesc})\cr 11 | Task or its description object.} 12 | } 13 | \value{ 14 | (\code{integer(1)}). 15 | } 16 | \description{ 17 | See title. 18 | } 19 | \seealso{ 20 | Other task: 21 | \code{\link{getTaskClassLevels}()}, 22 | \code{\link{getTaskCosts}()}, 23 | \code{\link{getTaskData}()}, 24 | \code{\link{getTaskDesc}()}, 25 | \code{\link{getTaskFeatureNames}()}, 26 | \code{\link{getTaskFormula}()}, 27 | \code{\link{getTaskId}()}, 28 | \code{\link{getTaskSize}()}, 29 | \code{\link{getTaskTargetNames}()}, 30 | \code{\link{getTaskTargets}()}, 31 | \code{\link{getTaskType}()}, 32 | \code{\link{subsetTask}()} 33 | } 34 | \concept{task} 35 | -------------------------------------------------------------------------------- /man/getTaskSize.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Task_operators.R 3 | \name{getTaskSize} 4 | \alias{getTaskSize} 5 | \title{Get number of observations in task.} 6 | \usage{ 7 | getTaskSize(x) 8 | } 9 | \arguments{ 10 | \item{x}{(\link{Task} | \link{TaskDesc})\cr 11 | Task or its description object.} 12 | } 13 | \value{ 14 | (\code{integer(1)}). 15 | } 16 | \description{ 17 | See title. 18 | } 19 | \seealso{ 20 | Other task: 21 | \code{\link{getTaskClassLevels}()}, 22 | \code{\link{getTaskCosts}()}, 23 | \code{\link{getTaskData}()}, 24 | \code{\link{getTaskDesc}()}, 25 | \code{\link{getTaskFeatureNames}()}, 26 | \code{\link{getTaskFormula}()}, 27 | \code{\link{getTaskId}()}, 28 | \code{\link{getTaskNFeats}()}, 29 | \code{\link{getTaskTargetNames}()}, 30 | \code{\link{getTaskTargets}()}, 31 | \code{\link{getTaskType}()}, 32 | \code{\link{subsetTask}()} 33 | } 34 | \concept{task} 35 | -------------------------------------------------------------------------------- /man/getTaskTargetNames.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Task_operators.R 3 | \name{getTaskTargetNames} 4 | \alias{getTaskTargetNames} 5 | \title{Get the name(s) of the target column(s).} 6 | \usage{ 7 | getTaskTargetNames(x) 8 | } 9 | \arguments{ 10 | \item{x}{(\link{Task} | \link{TaskDesc})\cr 11 | Task or its description object.} 12 | } 13 | \value{ 14 | (\link{character}). 15 | } 16 | \description{ 17 | NB: For multilabel, \link{getTaskTargetNames} and \link{getTaskClassLevels} 18 | actually return the same thing. 19 | } 20 | \seealso{ 21 | Other task: 22 | \code{\link{getTaskClassLevels}()}, 23 | \code{\link{getTaskCosts}()}, 24 | \code{\link{getTaskData}()}, 25 | \code{\link{getTaskDesc}()}, 26 | \code{\link{getTaskFeatureNames}()}, 27 | \code{\link{getTaskFormula}()}, 28 | \code{\link{getTaskId}()}, 29 | \code{\link{getTaskNFeats}()}, 30 | \code{\link{getTaskSize}()}, 31 | \code{\link{getTaskTargets}()}, 32 | \code{\link{getTaskType}()}, 33 | \code{\link{subsetTask}()} 34 | } 35 | \concept{task} 36 | -------------------------------------------------------------------------------- /man/getTaskType.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Task_operators.R 3 | \name{getTaskType} 4 | \alias{getTaskType} 5 | \title{Get the type of the task.} 6 | \usage{ 7 | getTaskType(x) 8 | } 9 | \arguments{ 10 | \item{x}{(\link{Task} | \link{TaskDesc})\cr 11 | Task or its description object.} 12 | } 13 | \value{ 14 | (\code{character(1)}). 15 | } 16 | \description{ 17 | See title. 18 | } 19 | \seealso{ 20 | Other task: 21 | \code{\link{getTaskClassLevels}()}, 22 | \code{\link{getTaskCosts}()}, 23 | \code{\link{getTaskData}()}, 24 | \code{\link{getTaskDesc}()}, 25 | \code{\link{getTaskFeatureNames}()}, 26 | \code{\link{getTaskFormula}()}, 27 | \code{\link{getTaskId}()}, 28 | \code{\link{getTaskNFeats}()}, 29 | \code{\link{getTaskSize}()}, 30 | \code{\link{getTaskTargetNames}()}, 31 | \code{\link{getTaskTargets}()}, 32 | \code{\link{subsetTask}()} 33 | } 34 | \concept{task} 35 | -------------------------------------------------------------------------------- /man/getTuneResultOptPath.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tuneParams.R 3 | \name{getTuneResultOptPath} 4 | \alias{getTuneResultOptPath} 5 | \title{Get the optimization path of a tuning result.} 6 | \usage{ 7 | getTuneResultOptPath(tune.result, as.df = TRUE) 8 | } 9 | \arguments{ 10 | \item{tune.result}{(\link{TuneResult}) \cr 11 | A tuning result of the (\link{tuneParams}) function.} 12 | 13 | \item{as.df}{(\code{logical(1)})\cr 14 | Should the optimization path be returned as a data frame? 15 | Default is \code{TRUE}.} 16 | } 17 | \value{ 18 | (\link[ParamHelpers:OptPath]{ParamHelpers::OptPath}) or (\link{data.frame}). 19 | } 20 | \description{ 21 | Returns the opt.path from a (\link{TuneResult}) object. 22 | } 23 | -------------------------------------------------------------------------------- /man/gunpoint.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{gunpoint.task} 5 | \alias{gunpoint.task} 6 | \title{Gunpoint functional data classification task.} 7 | \description{ 8 | Contains the task (\code{gunpoint.task}). 9 | You have to classify whether a person raises up a gun or just an empty hand. 10 | } 11 | \references{ 12 | See Ratanamahatana, C. A. & Keogh. E. (2004). Everything you know 13 | about Dynamic Time Warping is Wrong. Proceedings of SIAM International 14 | Conference on Data Mining (SDM05), 506-510. 15 | } 16 | \keyword{data} 17 | -------------------------------------------------------------------------------- /man/hasFunctionalFeatures.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hasFunctionalFeatures.R 3 | \name{hasFunctionalFeatures} 4 | \alias{hasFunctionalFeatures} 5 | \title{Check whether the object contains functional features.} 6 | \usage{ 7 | hasFunctionalFeatures(obj) 8 | } 9 | \arguments{ 10 | \item{obj}{(\code{Task} | \code{TaskDesc} | \code{data.frame})\cr 11 | Object to check.} 12 | } 13 | \value{ 14 | (\code{logical(1)}) 15 | } 16 | \description{ 17 | See title. 18 | } 19 | -------------------------------------------------------------------------------- /man/hasProperties.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Learner_properties.R 3 | \name{hasProperties} 4 | \alias{hasProperties} 5 | \title{Deprecated, use \code{hasLearnerProperties} instead.} 6 | \usage{ 7 | hasProperties(learner, props) 8 | } 9 | \arguments{ 10 | \item{learner}{Deprecated.} 11 | 12 | \item{props}{Deprecated.} 13 | } 14 | \description{ 15 | Deprecated, use \code{hasLearnerProperties} instead. 16 | } 17 | -------------------------------------------------------------------------------- /man/iris.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{iris.task} 5 | \alias{iris.task} 6 | \title{Iris classification task.} 7 | \description{ 8 | Contains the task (\code{iris.task}). 9 | } 10 | \references{ 11 | See \link[datasets:iris]{datasets::iris}. 12 | } 13 | \keyword{data} 14 | -------------------------------------------------------------------------------- /man/isFailureModel.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/WrappedModel.R 3 | \name{isFailureModel} 4 | \alias{isFailureModel} 5 | \title{Is the model a FailureModel?} 6 | \usage{ 7 | isFailureModel(model) 8 | } 9 | \arguments{ 10 | \item{model}{(\link{WrappedModel})\cr 11 | The model.} 12 | } 13 | \value{ 14 | (\code{logical(1)}). 15 | } 16 | \description{ 17 | Such a model is created when one sets the corresponding option in \link{configureMlr}. 18 | 19 | For complex wrappers this getter returns \code{TRUE} if ANY model contained in it failed. 20 | } 21 | -------------------------------------------------------------------------------- /man/joinClassLevels.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/joinClassLevels.R 3 | \name{joinClassLevels} 4 | \alias{joinClassLevels} 5 | \title{Join some class existing levels to new, larger class levels for classification problems.} 6 | \usage{ 7 | joinClassLevels(task, new.levels) 8 | } 9 | \arguments{ 10 | \item{task}{(\link{Task})\cr 11 | The task.} 12 | 13 | \item{new.levels}{(\code{list} of \code{character})\cr 14 | Element names specify the new class levels to create, while the corresponding element 15 | character vector specifies the existing class levels which will be joined to the new one.} 16 | } 17 | \value{ 18 | \link{Task}. 19 | } 20 | \description{ 21 | Join some class existing levels to new, larger class levels for classification problems. 22 | } 23 | \examples{ 24 | joinClassLevels(iris.task, new.levels = list(foo = c("setosa", "virginica"))) 25 | } 26 | -------------------------------------------------------------------------------- /man/learnerArgsToControl.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/learnerArgsToControl.R 3 | \name{learnerArgsToControl} 4 | \alias{learnerArgsToControl} 5 | \title{Convert arguments to control structure.} 6 | \usage{ 7 | learnerArgsToControl(control, ...) 8 | } 9 | \arguments{ 10 | \item{control}{(\code{function})\cr 11 | Function that creates control structure.} 12 | 13 | \item{...}{(any)\cr 14 | Arguments for control structure function.} 15 | } 16 | \value{ 17 | Control structure for learner. 18 | } 19 | \description{ 20 | Find all elements in \code{...} which are not missing and 21 | call \code{control} on them. 22 | } 23 | -------------------------------------------------------------------------------- /man/learners.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/learners.R 3 | \name{learners} 4 | \alias{learners} 5 | \title{List of supported learning algorithms.} 6 | \description{ 7 | All supported learners can be found by \link{listLearners} or as a table 8 | in the tutorial appendix: \url{https://mlr.mlr-org.com/articles/tutorial/integrated_learners.html}. 9 | } 10 | -------------------------------------------------------------------------------- /man/listFilterEnsembleMethods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/FilterEnsemble.R 3 | \name{listFilterEnsembleMethods} 4 | \alias{listFilterEnsembleMethods} 5 | \title{List ensemble filter methods.} 6 | \usage{ 7 | listFilterEnsembleMethods(desc = TRUE) 8 | } 9 | \arguments{ 10 | \item{desc}{(\code{logical(1)})\cr 11 | Provide more detailed information about filters. 12 | Default is \code{TRUE}.} 13 | } 14 | \value{ 15 | (\link{data.frame}). 16 | } 17 | \description{ 18 | Returns a subset-able dataframe with filter information. 19 | } 20 | \seealso{ 21 | Other filter: 22 | \code{\link{filterFeatures}()}, 23 | \code{\link{generateFilterValuesData}()}, 24 | \code{\link{getFilteredFeatures}()}, 25 | \code{\link{listFilterMethods}()}, 26 | \code{\link{makeFilterEnsemble}()}, 27 | \code{\link{makeFilterWrapper}()}, 28 | \code{\link{makeFilter}()}, 29 | \code{\link{plotFilterValues}()} 30 | } 31 | \concept{filter} 32 | -------------------------------------------------------------------------------- /man/listLearnerProperties.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Learner_properties.R 3 | \name{listLearnerProperties} 4 | \alias{listLearnerProperties} 5 | \title{List the supported learner properties} 6 | \usage{ 7 | listLearnerProperties(type = "any") 8 | } 9 | \arguments{ 10 | \item{type}{(\code{character(1)})\cr 11 | Only return properties for a specified task type. Default is \dQuote{any}.} 12 | } 13 | \value{ 14 | (\link{character}). 15 | } 16 | \description{ 17 | This is useful for determining which learner properties are available. 18 | } 19 | -------------------------------------------------------------------------------- /man/listMeasureProperties.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Measure_properties.R 3 | \name{listMeasureProperties} 4 | \alias{listMeasureProperties} 5 | \title{List the supported measure properties.} 6 | \usage{ 7 | listMeasureProperties() 8 | } 9 | \value{ 10 | (\link{character}). 11 | } 12 | \description{ 13 | This is useful for determining which measure properties are available. 14 | } 15 | -------------------------------------------------------------------------------- /man/listTaskTypes.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/helpers.R 3 | \name{listTaskTypes} 4 | \alias{listTaskTypes} 5 | \title{List the supported task types in mlr} 6 | \usage{ 7 | listTaskTypes() 8 | } 9 | \value{ 10 | (\link{character}). 11 | } 12 | \description{ 13 | Returns a character vector with each of the supported task types in mlr. 14 | } 15 | -------------------------------------------------------------------------------- /man/lung.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{lung.task} 5 | \alias{lung.task} 6 | \title{NCCTG Lung Cancer survival task.} 7 | \description{ 8 | Contains the task (\code{lung.task}). 9 | } 10 | \references{ 11 | See \link[survival:lung]{survival::lung}. 12 | Incomplete cases have been removed from the task. 13 | } 14 | \keyword{data} 15 | -------------------------------------------------------------------------------- /man/makeChainModel.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ChainModel.R 3 | \name{makeChainModel} 4 | \alias{makeChainModel} 5 | \title{Only exported for internal use.} 6 | \usage{ 7 | makeChainModel(next.model, cl) 8 | } 9 | \arguments{ 10 | \item{next.model}{(\link{WrappedModel})\cr 11 | The next model.} 12 | 13 | \item{cl}{(\link{character})\cr 14 | Subclass to assign to the resulting model.} 15 | } 16 | \description{ 17 | Only exported for internal use. 18 | } 19 | \keyword{internal} 20 | -------------------------------------------------------------------------------- /man/makeFixedHoldoutInstance.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/HoldoutInstance_make_fixed.R 3 | \name{makeFixedHoldoutInstance} 4 | \alias{makeFixedHoldoutInstance} 5 | \title{Generate a fixed holdout instance for resampling.} 6 | \usage{ 7 | makeFixedHoldoutInstance(train.inds, test.inds, size) 8 | } 9 | \arguments{ 10 | \item{train.inds}{(\link{integer})\cr 11 | Indices for training set.} 12 | 13 | \item{test.inds}{(\link{integer})\cr 14 | Indices for test set.} 15 | 16 | \item{size}{(\code{integer(1)})\cr 17 | Size of the data set to resample. 18 | The function needs to know the largest possible index of the whole data set.} 19 | } 20 | \value{ 21 | (\link{ResampleInstance}). 22 | } 23 | \description{ 24 | Generate a fixed holdout instance for resampling. 25 | } 26 | -------------------------------------------------------------------------------- /man/makeRLearner.classif.fdausc.glm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/RLearner_classif_fdausc.glm.R 3 | \name{makeRLearner.classif.fdausc.glm} 4 | \alias{makeRLearner.classif.fdausc.glm} 5 | \title{Classification of functional data by Generalized Linear Models.} 6 | \usage{ 7 | \method{makeRLearner}{classif.fdausc.glm}() 8 | } 9 | \description{ 10 | Learner for classification using Generalized Linear Models. 11 | } 12 | -------------------------------------------------------------------------------- /man/makeRLearner.classif.fdausc.kernel.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/RLearner_classif_fdausc.kernel.R 3 | \name{makeRLearner.classif.fdausc.kernel} 4 | \alias{makeRLearner.classif.fdausc.kernel} 5 | \title{Learner for kernel classification for functional data.} 6 | \usage{ 7 | \method{makeRLearner}{classif.fdausc.kernel}() 8 | } 9 | \description{ 10 | Learner for kernel Classification. 11 | } 12 | -------------------------------------------------------------------------------- /man/makeRLearner.classif.fdausc.np.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/RLearner_classif_fdausc.np.R 3 | \name{makeRLearner.classif.fdausc.np} 4 | \alias{makeRLearner.classif.fdausc.np} 5 | \title{Learner for nonparametric classification for functional data.} 6 | \usage{ 7 | \method{makeRLearner}{classif.fdausc.np}() 8 | } 9 | \description{ 10 | Learner for Nonparametric Supervised Classification. 11 | } 12 | -------------------------------------------------------------------------------- /man/makeTaskDescInternal.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/TaskDesc.R 3 | \name{makeTaskDescInternal} 4 | \alias{makeTaskDescInternal} 5 | \title{Exported for internal use.} 6 | \usage{ 7 | makeTaskDescInternal(type, id, data, target, weights, blocking, coordinates) 8 | } 9 | \arguments{ 10 | \item{type}{(\code{character(1)})\cr 11 | Task type.} 12 | 13 | \item{id}{(\code{character(1)})\cr 14 | task id} 15 | 16 | \item{data}{(\link{data.frame})\cr 17 | data} 18 | 19 | \item{target}{(\link{character})\cr 20 | target columns} 21 | 22 | \item{weights}{(\link{numeric})\cr 23 | weights} 24 | 25 | \item{blocking}{(\link{numeric})\cr 26 | task data blocking} 27 | 28 | \item{coordinates}{(\code{logical(1)})\cr 29 | whether spatial coordinates have been provided} 30 | } 31 | \description{ 32 | Exported for internal use. 33 | } 34 | \keyword{internal} 35 | -------------------------------------------------------------------------------- /man/mtcars.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{mtcars.task} 5 | \alias{mtcars.task} 6 | \title{Motor Trend Car Road Tests clustering task.} 7 | \description{ 8 | Contains the task (\code{mtcars.task}). 9 | } 10 | \references{ 11 | See \link[datasets:mtcars]{datasets::mtcars}. 12 | } 13 | \keyword{data} 14 | -------------------------------------------------------------------------------- /man/phoneme.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{phoneme.task} 5 | \alias{phoneme.task} 6 | \title{Phoneme functional data multilabel classification task.} 7 | \description{ 8 | Contains the task (\code{phoneme.task}). 9 | The task contains a single functional covariate and 5 equally big classes (aa, ao, dcl, iy, sh). 10 | The aim is to predict the class of the phoneme in the functional. 11 | The dataset is contained in the package fda.usc. 12 | } 13 | \references{ 14 | F. Ferraty and P. Vieu (2003) "Curve discrimination: a nonparametric functional approach", Computational Statistics and Data Analysis, 44(1-2), 161-173. 15 | F. Ferraty and P. Vieu (2006) Nonparametric functional data analysis, New York: Springer. 16 | T. Hastie and R. Tibshirani and J. Friedman (2009) The elements of statistical learning: Data mining, inference and prediction, 2nd edn, New York: Springer. 17 | } 18 | \keyword{data} 19 | -------------------------------------------------------------------------------- /man/pid.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{pid.task} 5 | \alias{pid.task} 6 | \title{PimaIndiansDiabetes classification task.} 7 | \description{ 8 | Contains the task (\code{pid.task}). 9 | } 10 | \references{ 11 | See \link[mlbench:PimaIndiansDiabetes]{mlbench::PimaIndiansDiabetes}. 12 | Note that this is the uncorrected version from mlbench. 13 | } 14 | \keyword{data} 15 | -------------------------------------------------------------------------------- /man/reextractFDAFeatures.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/extractFDAFeatures.R 3 | \name{reextractFDAFeatures} 4 | \alias{reextractFDAFeatures} 5 | \title{Re-extract features from a data set} 6 | \usage{ 7 | reextractFDAFeatures(obj, desc, ...) 8 | } 9 | \arguments{ 10 | \item{obj}{(\link{Task} | \link{data.frame})\cr 11 | Task or data.frame to extract functional features from. Must contain functional features 12 | as matrix columns.} 13 | 14 | \item{desc}{(\code{extractFDAFeatDesc})\cr 15 | FDAFeature extraction description as returned by \link{extractFDAFeatures}} 16 | 17 | \item{...}{(any)\cr 18 | Further args passed on to methods.} 19 | } 20 | \value{ 21 | \link{data.frame} or \link{Task} containing the extracted Features 22 | } 23 | \description{ 24 | This function accepts a data frame or a task and an extractFDAFeatDesc 25 | (a FDA feature extraction description) 26 | as returned by \link{extractFDAFeatures} to extract features 27 | from previously unseen data. 28 | } 29 | \concept{extractFDAFeatures} 30 | -------------------------------------------------------------------------------- /man/setHyperPars2.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/setHyperPars.R 3 | \name{setHyperPars2} 4 | \alias{setHyperPars2} 5 | \title{Only exported for internal use.} 6 | \usage{ 7 | setHyperPars2(learner, par.vals) 8 | } 9 | \arguments{ 10 | \item{learner}{(\link{Learner})\cr 11 | The learner.} 12 | 13 | \item{par.vals}{(\link{list})\cr 14 | List of named (hyper)parameter settings.} 15 | } 16 | \description{ 17 | Only exported for internal use. 18 | } 19 | -------------------------------------------------------------------------------- /man/simplifyMeasureNames.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/simplifyMeasureNames.R 3 | \name{simplifyMeasureNames} 4 | \alias{simplifyMeasureNames} 5 | \title{Simplify measure names.} 6 | \usage{ 7 | simplifyMeasureNames(xs) 8 | } 9 | \arguments{ 10 | \item{xs}{(\link{character})\cr 11 | Character vector that (possibly) contains aggregated measure names.} 12 | } 13 | \value{ 14 | (\link{character}). 15 | } 16 | \description{ 17 | Clips aggregation names from character vector. 18 | E.g: 'mmce.test.mean' becomes 'mmce'. 19 | Elements that don't contain a measure name are ignored and returned unchanged. 20 | } 21 | -------------------------------------------------------------------------------- /man/sonar.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{sonar.task} 5 | \alias{sonar.task} 6 | \title{Sonar classification task.} 7 | \description{ 8 | Contains the task (\code{sonar.task}). 9 | } 10 | \references{ 11 | See \link[mlbench:Sonar]{mlbench::Sonar}. 12 | } 13 | \keyword{data} 14 | -------------------------------------------------------------------------------- /man/spam.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{spam.task} 5 | \alias{spam.task} 6 | \title{Spam classification task.} 7 | \description{ 8 | Contains the task (\code{spam.task}). 9 | } 10 | \references{ 11 | See \link[kernlab:spam]{kernlab::spam}. 12 | } 13 | \keyword{data} 14 | -------------------------------------------------------------------------------- /man/wpbc.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{wpbc.task} 5 | \alias{wpbc.task} 6 | \title{Wisonsin Prognostic Breast Cancer (WPBC) survival task.} 7 | \description{ 8 | Contains the task (\code{wpbc.task}). 9 | } 10 | \references{ 11 | See \link[TH.data:wpbc]{TH.data::wpbc}. 12 | Incomplete cases have been removed from the task. 13 | } 14 | \keyword{data} 15 | -------------------------------------------------------------------------------- /man/yeast.task.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \docType{data} 4 | \name{yeast.task} 5 | \alias{yeast.task} 6 | \title{Yeast multilabel classification task.} 7 | \source{ 8 | \url{https://archive.ics.uci.edu/ml/datasets/Yeast} (In long instead of wide format) 9 | } 10 | \description{ 11 | Contains the task (\code{yeast.task}). 12 | } 13 | \references{ 14 | Elisseeff, A., & Weston, J. (2001): 15 | A kernel method for multi-labelled classification. 16 | In Advances in neural information processing systems (pp. 681-687). 17 | } 18 | \keyword{data} 19 | -------------------------------------------------------------------------------- /mlr.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: knitr 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | PackageCheckArgs: --as-cran --no-tests 22 | PackageRoxygenize: rd,collate,namespace 23 | -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-120x120.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/pkgdown/favicon/apple-touch-icon-120x120.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-152x152.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/pkgdown/favicon/apple-touch-icon-152x152.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-180x180.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/pkgdown/favicon/apple-touch-icon-180x180.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-60x60.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/pkgdown/favicon/apple-touch-icon-60x60.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-76x76.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/pkgdown/favicon/apple-touch-icon-76x76.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/pkgdown/favicon/apple-touch-icon.png -------------------------------------------------------------------------------- /pkgdown/favicon/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/pkgdown/favicon/favicon-16x16.png -------------------------------------------------------------------------------- /pkgdown/favicon/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/pkgdown/favicon/favicon-32x32.png -------------------------------------------------------------------------------- /pkgdown/favicon/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/pkgdown/favicon/favicon.ico -------------------------------------------------------------------------------- /src/Makevars.win: -------------------------------------------------------------------------------- 1 | PKG_CPPFLAGS += -std=gnu99 2 | -------------------------------------------------------------------------------- /src/init.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // for NULL 4 | #include 5 | 6 | /* .Call calls */ 7 | extern SEXP c_smote(SEXP, SEXP, SEXP, SEXP); 8 | 9 | static const R_CallMethodDef CallEntries[] = { 10 | {"c_smote", (DL_FUNC) &c_smote, 4}, 11 | {NULL, NULL, 0} 12 | }; 13 | 14 | void R_init_mlr(DllInfo *dll) { 15 | R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); 16 | R_useDynamicSymbols(dll, FALSE); 17 | } 18 | -------------------------------------------------------------------------------- /src/macros.h: -------------------------------------------------------------------------------- 1 | #ifndef FOO_MACROS_H 2 | #define FOO_MACROS_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | #define UNPACK_REAL_VECTOR(S, D, N) \ 10 | double *D = REAL(S); \ 11 | const R_len_t N = length(S); 12 | 13 | #define UNPACK_REAL_MATRIX(S, D, N, K) \ 14 | double *D = REAL(S); \ 15 | const R_len_t N = nrows(S); \ 16 | const R_len_t K = ncols(S); 17 | 18 | #define UNPACK_INT_MATRIX(S, D, N, K) \ 19 | int *D = INTEGER(S); \ 20 | const R_len_t N = nrows(S); \ 21 | const R_len_t K = ncols(S); 22 | 23 | #define UNPACK_REAL_MATRIX_2(S, D, N) \ 24 | double *D = REAL(S); \ 25 | const R_len_t N = nrows(S); 26 | 27 | #endif 28 | -------------------------------------------------------------------------------- /tests/figs/deps.txt: -------------------------------------------------------------------------------- 1 | - vdiffr-svg-engine: 1.0 2 | - vdiffr: 0.3.1.9000 3 | - freetypeharfbuzz: 0.2.5 4 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | if (identical(Sys.getenv("NOT_CRAN"), "true")) { 3 | set.seed(getOption("mlr.debug.seed")) 4 | test_check("mlr") 5 | } 6 | -------------------------------------------------------------------------------- /tests/testthat/helper_funs.R: -------------------------------------------------------------------------------- 1 | boosting_helper1 = function(formula, data, subset = seq_len(nrow(data)), ...) { 2 | args = list(...) 3 | if (!is.null(args$cp)) { 4 | ctrl = rpart::rpart.control(cp = args$cp, xval = 0) 5 | } else { 6 | ctrl = rpart::rpart.control(xval = 0) 7 | } 8 | set.seed(getOption("mlr.debug.seed")) 9 | adabag::boosting(formula, data[subset, ], mfinal = args$mfinal, control = ctrl) 10 | } 11 | 12 | boosting_helper2 = function(model, newdata) { 13 | set.seed(getOption("mlr.debug.seed")) 14 | as.factor(predict(model, newdata)$class) 15 | } 16 | -------------------------------------------------------------------------------- /tests/testthat/helper_zzz.R: -------------------------------------------------------------------------------- 1 | set.seed(123) 2 | options(mlr.debug.seed = 123L) 3 | options(datatable.rbindlist.check = "error") 4 | configureMlr(show.info = FALSE, show.learner.output = FALSE) 5 | 6 | library(checkmate) 7 | -------------------------------------------------------------------------------- /tests/testthat/test_base_DummyFeaturesWrapper.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("DummyFeaturesWrapper", { 3 | methods = c("1-of-n", "reference") 4 | 5 | for (m in methods) { 6 | lrn = makeLearner("classif.ksvm") 7 | lrn.w = makeDummyFeaturesWrapper(lrn, method = m) 8 | bc.task.dummy = createDummyFeatures(bc.task, method = m) 9 | 10 | # check if predict works 11 | mod = train(lrn, bc.task.dummy, subset = 1:400) 12 | mod.w = train(lrn.w, bc.task, subset = 1:400) 13 | expect_equal(getLearnerModel(mod.w)$features, getTaskFeatureNames(bc.task.dummy)) 14 | expect_equal(getLearnerModel(mod.w)$features, attr(getLearnerModel(mod)@terms, "term.labels")) 15 | 16 | # check if predict works 17 | pred = predict(mod, bc.task.dummy, subset = 401:getTaskSize(bc.task)) 18 | pred.w = predict(mod.w, bc.task, subset = 401:getTaskSize(bc.task)) 19 | expect_equal(getPredictionResponse(pred), getPredictionResponse(pred.w)) 20 | } 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_base_ModelMultiplexer.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("ModelMultiplexer inherits properties", { 3 | bls = list( 4 | makeLearner("classif.lda"), 5 | makeLearner("classif.randomForest") 6 | ) 7 | lrn = makeModelMultiplexer(bls) 8 | expect_set_equal(getLearnerProperties(lrn), getLearnerProperties(bls[[1L]])) 9 | 10 | lrn = setHyperPars(lrn, selected.learner = "classif.randomForest") 11 | expect_set_equal(getLearnerProperties(lrn), getLearnerProperties(bls[[2L]])) 12 | }) 13 | -------------------------------------------------------------------------------- /tests/testthat/test_base_aggregations.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("aggregations", { 3 | ms = list(mmce, acc, tp, fp, tn, fn, tpr, fpr, tnr, fnr, ppv, npv, mcc, f1, auc) 4 | rdesc = makeResampleDesc("CV", iters = 2) 5 | lrn = makeLearner("classif.rpart", predict.type = "prob") 6 | r = resample(lrn, task = binaryclass.task, resampling = rdesc, measures = ms) 7 | a = r$aggr 8 | expect_equal(length(a), length(ms)) 9 | expect_true(!any(is.na(as.logical(a)))) 10 | }) 11 | 12 | test_that("testgroup.mean", { 13 | perf.test = 1:4 14 | group = c(1, 1, 2, 2) 15 | 16 | expect_equal(testgroup.mean$fun(NA, perf.test, NA, mean, group, NA), mean(c(mean(1:2), mean(3:4)))) 17 | }) 18 | 19 | test_that("testgroup.sd", { 20 | perf.test = 1:10 21 | group = c(rep(1, 5), rep(2, 5)) 22 | 23 | expect_equal(testgroup.sd$fun(NA, perf.test, NA, mean, group, NA), sd(c(mean(1:5), mean(6:10)))) 24 | }) 25 | -------------------------------------------------------------------------------- /tests/testthat/test_base_chains.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("chains", { 3 | lrn1 = makeLearner("classif.rpart", minsplit = 10) 4 | lrn4 = makeFilterWrapper(lrn1, fw.perc = 0.5) 5 | 6 | m = train(lrn4, multiclass.task) 7 | 8 | p = predict(m, multiclass.task) 9 | perf = performance(p, mmce) 10 | expect_true(perf < 0.1) 11 | 12 | outer = makeResampleDesc("Holdout") 13 | inner = makeResampleDesc("CV", iters = 2) 14 | 15 | ps = makeParamSet( 16 | makeDiscreteParam(id = "minsplit", values = c(5, 10)), 17 | makeDiscreteParam(id = "fw.perc", values = c(0.8, 1)) 18 | ) 19 | 20 | lrn5 = makeTuneWrapper(lrn4, resampling = inner, par.set = ps, 21 | control = makeTuneControlGrid()) 22 | m = train(lrn5, task = multiclass.task) 23 | p = predict(m, task = multiclass.task) 24 | or = m$learner.model$opt.result 25 | expect_equal(length(or$x), 2) 26 | expect_equal(getOptPathLength(or$opt.path), 2 * 2) 27 | 28 | perf = performance(p, mmce) 29 | expect_true(perf < 0.1) 30 | }) 31 | -------------------------------------------------------------------------------- /tests/testthat/test_base_checkTaskLearner.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("checkTaskLearner", { 3 | df = multiclass.df 4 | df[1, 1] = NA 5 | task = makeClassifTask(data = df, target = multiclass.target) 6 | expect_error(train(makeLearner("classif.lda"), task), "missing values") 7 | expect_error(train(makeLearner("regr.km"), regr.task), "factor inputs") 8 | expect_error(train(makeLearner("classif.gbm"), regr.task), "is for 'classif'") 9 | expect_error(train(makeLearner("regr.gbm"), multiclass.task), "is for 'regr'") 10 | }) 11 | -------------------------------------------------------------------------------- /tests/testthat/test_base_checkTaskSubset.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("checkTaskSubset", { 3 | expect_equal(1:50, checkTaskSubset(1:50, size = 50)) 4 | subs20 = sample.int(50, 20) 5 | expect_equal(subs20, checkTaskSubset(subs20, size = 50)) 6 | 7 | subs.bool = sample(c(TRUE, FALSE), size = 50, replace = TRUE) 8 | expect_equal(which(subs.bool), checkTaskSubset(subs.bool, size = 50)) 9 | expect_error(checkTaskSubset(subs20, size = 10), regexp = "<= 10") 10 | 11 | # oversampling is allowed 12 | subs50 = sample.int(20, 50, replace = TRUE) 13 | expect_equal(subs50, checkTaskSubset(subs50, size = 20)) 14 | }) 15 | -------------------------------------------------------------------------------- /tests/testthat/test_base_configureMlr.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("getOptions and configureMlr", { 3 | mlr.options = getMlrOptions() 4 | expect_equal(length(mlr.options), 8L) 5 | 6 | configureMlr(on.learner.error = "quiet") 7 | expect_equal(getMlrOptions()$on.learner.error, "quiet") 8 | 9 | do.call(configureMlr, mlr.options) 10 | expect_equal(getMlrOptions(), mlr.options) 11 | }) 12 | -------------------------------------------------------------------------------- /tests/testthat/test_base_convertMLBenchObjToTask.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("convertMLbenchObjToTask", { 3 | suppressMessages(requirePackagesOrSkip("mlbench")) 4 | # get all mlbench.* functions, 1spiral does not work 5 | fs = ls("package:mlbench", pattern = "mlbench") 6 | n = 77L 7 | for (f in setdiff(fs, "mlbench.1spiral")) { 8 | task = convertMLBenchObjToTask(f, n = n) 9 | expect_s3_class(task, "Task") 10 | # for some, n is not properly respected in mlbench 11 | if (f %nin% c("mlbench.corners", "mlbench.hypercube", "mlbench.simplex")) { 12 | expect_equal(getTaskSize(task), n) 13 | } 14 | } 15 | 16 | # get all mlbench datasets, HouseVotes84 and Ozone have NAs in target col 17 | ds = data(package = "mlbench") 18 | ds = ds$results[, "Item"] 19 | for (d in setdiff(ds, c("HouseVotes84", "Ozone"))) { 20 | task = convertMLBenchObjToTask(d, n = n) 21 | expect_s3_class(task, "Task") 22 | expect_equal(getTaskId(task), d) 23 | } 24 | }) 25 | -------------------------------------------------------------------------------- /tests/testthat/test_base_costs.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("costs", { 3 | lrn = makeLearner("classif.rpart") 4 | rdesc = makeResampleDesc("Holdout") 5 | task = binaryclass.task 6 | task$task.desc$positive = "M" 7 | 8 | cc = 1 - diag(1, 2) 9 | rownames(cc) = colnames(cc) = getTaskClassLevels(task) 10 | ms = makeCostMeasure(costs = cc) 11 | r = resample(lrn, rdesc, task = task, measures = list(mmce, ms)) 12 | expect_equal(r$aggr[[1]], r$aggr[[2]]) 13 | 14 | cc = matrix(0, 2, 2) 15 | rownames(cc) = colnames(cc) = getTaskClassLevels(task) 16 | cc["R", "M"] = 1 17 | ms = makeCostMeasure(id = "foo", costs = cc, combine = sum) 18 | expect_equal(ms$id, "foo") 19 | r = resample(lrn, rdesc, task = task, measures = list(fp, ms)) 20 | expect_equal(r$aggr[[1]], r$aggr[[2]]) 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_base_createSpatialResamplingPlots.R: -------------------------------------------------------------------------------- 1 | test_that("test createSpatialResamplingPlots() creates 10 ggplot objects", { 2 | 3 | # take more reps to see if the restriction on two reps works 4 | rdesc = makeResampleDesc("SpRepCV", folds = 5, reps = 3) 5 | r = resample(makeLearner("classif.qda"), spatial.task, rdesc) 6 | 7 | plots = createSpatialResamplingPlots(spatial.task, r, crs = 32717, 8 | repetitions = 2, x.axis.breaks = c(-79.065, -79.085), 9 | y.axis.breaks = c(-3.970, -4)) 10 | 11 | expect_class(plots[[1]][[1]], "gg") 12 | expect_length(plots[[1]], 10) 13 | }) 14 | -------------------------------------------------------------------------------- /tests/testthat/test_base_dropFeatures.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("dropFeatures", { 3 | fns = getTaskFeatureNames(multiclass.task) 4 | task2 = dropFeatures(multiclass.task, fns[1]) 5 | expect_equal(length(getTaskFeatureNames(task2)), 3L) 6 | }) 7 | -------------------------------------------------------------------------------- /tests/testthat/test_base_estimateResidualVariance.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("estimateResidualVariance", { 3 | set.seed(getOption("mlr.debug.seed")) 4 | task = regr.task 5 | lrn = makeLearner("regr.lm") 6 | x1 = estimateResidualVariance(lrn, task) 7 | x2 = estimateResidualVariance(lrn, data = regr.df, target = regr.target) 8 | expect_equal(round(x1, 4), 21.9382) 9 | expect_equal(x1, x2) 10 | }) 11 | -------------------------------------------------------------------------------- /tests/testthat/test_base_generateFeatureImportanceData.R: -------------------------------------------------------------------------------- 1 | test_that("generateFeatureImportanceData", { 2 | regr.imp = generateFeatureImportanceData(regr.task, "permutation.importance", 3 | "regr.rpart", c("lstat", "crim"), FALSE, mse, 4 | function(x, y) abs(x - y), median, 1L, TRUE, FALSE) 5 | expect_equal(colnames(regr.imp$res), c("lstat", "crim")) 6 | expect_equal(dim(regr.imp$res), c(1, 2)) 7 | 8 | classif.imp = generateFeatureImportanceData(multiclass.task, "permutation.importance", 9 | "classif.rpart", c("Petal.Width", "Petal.Length"), TRUE, ber, nmc = 1L, local = TRUE) 10 | expect_equal(colnames(classif.imp$res), stri_paste("Petal.Width", "Petal.Length", sep = ":")) 11 | expect_equal(dim(classif.imp$res), c(getTaskSize(multiclass.task), 1)) 12 | 13 | # Test printer 14 | expect_output(print(classif.imp), regexp = "FeatureImportance:") 15 | }) 16 | -------------------------------------------------------------------------------- /tests/testthat/test_base_getOOBPreds.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("getOOBPreds", { 3 | lrns = list( 4 | makeLearner("classif.randomForest"), 5 | makeFilterWrapper(learner = "classif.randomForest", 6 | fw.method = "FSelectorRcpp_information.gain", 7 | fw.abs = 2)) 8 | 9 | task = subsetTask(binaryclass.task, subset = c(10:20, 180:190), 10 | features = getTaskFeatureNames(binaryclass.task)[12:15]) 11 | 12 | for (lrn in lrns) { 13 | mod = train(lrn, task) 14 | oob = getOOBPreds(mod, task) 15 | pred = predict(mod, task) 16 | expect_true(is.numeric(performance(oob, measures = list(acc)))) 17 | expect_equal(dim(oob$data), dim(pred$data)) 18 | expect_equal(names(oob$data), names(pred$data)) 19 | expect_equal(names(oob), names(pred)) 20 | } 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_base_getParamSet.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("getParamSet", { 3 | lrn = makeLearner("classif.lda") 4 | ps = getParamSet(lrn) 5 | expect_true(setequal(names(ps$pars), c("method", "nu", "tol", "predict.method", "CV", "prior"))) 6 | 7 | lrn = makeFilterWrapper(lrn) 8 | ps = getParamSet(lrn) 9 | expect_true(all(c("method", "fw.method") %in% names(ps$pars))) 10 | 11 | lrn = makeModelMultiplexer(list(setLearnerId(lrn, "x"))) 12 | ps = getParamSet(lrn) 13 | expect_true(all(c("x.method", "x.fw.method", "selected.learner") %in% names(ps$pars))) 14 | 15 | expect_true("type" %in% names(getParamSet("classif.ksvm")$pars)) 16 | }) 17 | -------------------------------------------------------------------------------- /tests/testthat/test_base_makeLearners.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("makeLearners", { 3 | cls1 = c("classif.rpart", "classif.lda") 4 | cls2 = c("rpart", "lda") 5 | lrns1 = setNames(lapply(cls1, makeLearner), cls1) 6 | ids = c("a", "b") 7 | lrns1 = setNames(lapply(cls1, makeLearner), cls1) 8 | lrns2 = setNames(mapply(setLearnerId, lrns1, ids, SIMPLIFY = FALSE), ids) 9 | lrns3 = lapply(lrns1, setPredictType, predict.type = "prob") 10 | 11 | res = makeLearners(cls1) 12 | expect_equal(res, lrns1) 13 | res = makeLearners(cls1, ids = ids) 14 | expect_equal(res, lrns2) 15 | res = makeLearners(cls2, type = "classif") 16 | expect_equal(res, lrns1) 17 | res = makeLearners(cls2, type = "classif", predict.type = "prob") 18 | expect_equal(res, lrns3) 19 | }) 20 | -------------------------------------------------------------------------------- /tests/testthat/test_base_orderedfactors.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("ordered factors", { 3 | data(BreastCancer, package = "mlbench") 4 | 5 | df = na.omit(BreastCancer) 6 | df$Id = NULL 7 | task = makeClassifTask(id = "BreastCancer", data = df, target = "Class") 8 | 9 | expect_equal(getTaskDesc(task)$n.feat, c(numerics = 0L, factors = 4L, 10 | ordered = 5L, functionals = 0L)) 11 | expect_equal(getTaskNFeats(task), 9L) 12 | 13 | expect_error(train("classif.lda", task), "has ordered factor") 14 | z = holdout("classif.rpart", task) 15 | expect_true(!is.na(z$aggr)) 16 | }) 17 | -------------------------------------------------------------------------------- /tests/testthat/test_base_resample_b632.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("b632", { 3 | res = makeResampleDesc("Bootstrap", iters = 2, predict = "both") 4 | m = setAggregation(mmce, b632) 5 | r = resample(makeLearner("classif.rpart"), task = binaryclass.task, 6 | resampling = res, measure = m) 7 | m1 = r$measures.train 8 | m2 = r$measures.test 9 | p = as.data.frame(r$pred) 10 | ls11 = p[p$set == "train" & p$iter == 1, c("truth", "response")] 11 | ls12 = p[p$set == "test" & p$iter == 1, c("truth", "response")] 12 | ls1 = 0.368 * mean(ls11[, 1] != ls11[, 2]) + 0.632 * mean(ls12[, 1] != ls12[, 2]) 13 | ls21 = p[p$set == "train" & p$iter == 2, c("truth", "response")] 14 | ls22 = p[p$set == "test" & p$iter == 2, c("truth", "response")] 15 | ls2 = 0.368 * mean(ls21[, 1] != ls21[, 2]) + 0.632 * mean(ls22[, 1] != ls22[, 2]) 16 | ag = r$aggr 17 | expect_equal(mean(c(ls1, ls2)), ag[["mmce.b632"]]) 18 | }) 19 | -------------------------------------------------------------------------------- /tests/testthat/test_base_resample_b632plus.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("b632+", { 3 | res = makeResampleDesc("Bootstrap", iters = 3, predict = "both") 4 | m = setAggregation(mmce, b632plus) 5 | r = resample(makeLearner("classif.lda"), multiclass.task, res, measures = m) 6 | x = r$aggr[["mmce.b632plus"]] 7 | expect_true(is.numeric(x) & length(x) == 1 && !is.na(x)) 8 | }) 9 | -------------------------------------------------------------------------------- /tests/testthat/test_base_resample_convenience.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("resample convenience functions", { 3 | mycheck = function(r) { 4 | expect_true(all(!is.na(r$aggr))) 5 | } 6 | 7 | r = holdout("classif.rpart", multiclass.task) 8 | mycheck(r) 9 | 10 | r = subsample("classif.rpart", multiclass.task, iters = 1L, split = 0.2, 11 | minsplit = 50L, models = TRUE) 12 | mycheck(r) 13 | expect_equal(r$models[[1L]]$learner.model$control$minsplit, 50L) 14 | 15 | lrn = makeLearner("classif.rpart") 16 | r = crossval(lrn, multiclass.task, iters = 2L) 17 | mycheck(r) 18 | 19 | r = repcv("classif.rpart", multiclass.task, folds = 2L, reps = 2L, stratify = TRUE) 20 | mycheck(r) 21 | 22 | r = bootstrapOOB("classif.rpart", multiclass.task, iters = 1L) 23 | mycheck(r) 24 | r = bootstrapB632("classif.rpart", multiclass.task, iters = 1L) 25 | mycheck(r) 26 | r = bootstrapB632plus("classif.rpart", multiclass.task, iters = 1L) 27 | mycheck(r) 28 | }) 29 | -------------------------------------------------------------------------------- /tests/testthat/test_base_resample_loo.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("loo instance works", { 3 | rin = makeResampleInstance(makeResampleDesc("LOO"), size = 10) 4 | 5 | iters = rin$desc$iters 6 | expect_equal(iters, 10) 7 | 8 | for (i in 1:iters) { 9 | i1 = rin$train.inds[[i]] 10 | i2 = rin$test.inds[[i]] 11 | expect_true(min(i1) >= 1) 12 | expect_true(max(i1) <= 10) 13 | expect_true(min(i2) >= 1) 14 | expect_true(max(i2) <= 10) 15 | expect_equal(length(i1), 9) 16 | expect_equal(length(i2), 1) 17 | expect_equal(sort(c(unique(i1), i2)), 1:10) 18 | } 19 | }) 20 | -------------------------------------------------------------------------------- /tests/testthat/test_base_resample_makeResampleDesc.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("makeResampleDesc", { 3 | desc1 = makeResampleDesc("CV", predict = "test", iters = 2) 4 | expect_equal(desc1$iters, 2) 5 | expect_equal(desc1$predict, "test") 6 | expect_error(makeResampleDesc("Foo", predict = "test", iters = 2), 7 | "Assertion on 'method' failed: Must be element of set") 8 | expect_error(makeResampleDesc("CV", predict = "Foo", iters = 2), 9 | "Assertion on 'predict' failed: Must be element of set") 10 | }) 11 | -------------------------------------------------------------------------------- /tests/testthat/test_base_resample_weights.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("resample works with weights", { 3 | task = makeClassifTask(data = iris, target = "Species", weights = as.integer(iris$Species)) 4 | res = resample(task = task, learner = "classif.rpart", resampling = makeResampleDesc("CV", iters = 2)) 5 | expect_s3_class(res$pred, "ResamplePrediction") 6 | }) 7 | -------------------------------------------------------------------------------- /tests/testthat/test_base_simplifyMeasureNames.R: -------------------------------------------------------------------------------- 1 | test_that("simplifyMeasureNames", { 2 | # setup some measures get ids and aggegated names 3 | meas = list(mmce, acc, ber) 4 | meas.aggr = vcapply(meas, measureAggrName) 5 | meas.ids = extractSubList(meas, "id") 6 | # some dummy-strings not representing measures 7 | no.meas = c("abc", "def") 8 | # join aggr.names and dummy entries together 9 | xs = c(meas.aggr, no.meas) 10 | # test that aggr names get clipped and dummies are unchanged 11 | expected = c(meas.ids, no.meas) 12 | expect_equal(expected, simplifyMeasureNames(xs)) 13 | 14 | # check measure ids are ignored too 15 | xs = c("acc", "no measure") 16 | expect_equal(xs, simplifyMeasureNames(xs)) 17 | 18 | # check inputs of length 0 19 | xs = character(0L) 20 | expect_equal(xs, simplifyMeasureNames(xs)) 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_base_summarizeColumns.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("summarizeColumns", { 3 | d = data.frame(x = 1:5, y = c("a", "b", "c", "d", "e"), 4 | z = c(TRUE, TRUE, TRUE, FALSE, FALSE), stringsAsFactors = FALSE) 5 | 6 | s = summarizeColumns(d) 7 | expect_equal(dim(s), c(ncol(d), 10L)) 8 | expect_equal(s$na, c(0, 0, 0)) 9 | expect_equal(s$mean, c(3, NA, NA)) 10 | 11 | s = summarizeColumns(iris.task) 12 | 13 | d = iris 14 | d[1L, 1L] = NA_real_ 15 | s = summarizeColumns(d) 16 | expect_equal(s$na, c(1, 0, 0, 0, 0)) 17 | expect_false(is.na(s[1L, "mean"])) 18 | expect_false(is.na(s[1L, "disp"])) 19 | expect_false(is.na(s[1L, "mad"])) 20 | expect_false(is.na(s[1L, "median"])) 21 | expect_equal(s[5L, "min"], 50L) 22 | expect_equal(s[5L, "max"], 50L) 23 | d = iris 24 | d[1L, 5L] = NA_real_ 25 | s = summarizeColumns(d) 26 | expect_equal(s$na, c(0, 0, 0, 0, 1)) 27 | expect_equal(s[5L, "min"], 49L) 28 | expect_equal(s[5L, "max"], 50L) 29 | }) 30 | -------------------------------------------------------------------------------- /tests/testthat/test_base_summarizeLevels.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("summarizeLevels", { 3 | data = data.frame(num = as.numeric(1:6), fac = as.factor(1:6), int = as.integer(1:6)) 4 | data2 = data.frame(cha = as.character(1:6), cha2 = 3:8) 5 | data3 = data.frame(fac = as.factor(c(rep(1, 1000), rep(2, 100000))), fac2 = as.integer(rep(4, 101000))) 6 | data4 = data.frame(fac1 = as.factor(1)) 7 | 8 | expect_error(summarizeLevels(data, "num")) 9 | expect_equal(length(summarizeLevels(data)), 1L) 10 | expect_equal(length(summarizeLevels(data, "fac")), 1L) 11 | expect_equal(as.numeric(summarizeLevels(data)[[1]]), rep(1, 6)) 12 | expect_equal(as.numeric(summarizeLevels(data2)[[1]]), rep(1, 6)) 13 | expect_equal(as.numeric(summarizeLevels(data3)[[1]]), c(1000, 100000)) 14 | expect_equal(as.numeric(summarizeLevels(data4)[[1]]), 1) 15 | }) 16 | -------------------------------------------------------------------------------- /tests/testthat/test_base_train.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("train works with subset", { 3 | subs = 1:5 4 | mod = train("classif.rpart", binaryclass.task, subset = subs) 5 | expect_equal(mod$subset, subs) 6 | expect_equal(length(mod$learner.model$y), length(subs)) 7 | 8 | subs = c(TRUE, FALSE, TRUE, TRUE) 9 | mod = train("classif.rpart", binaryclass.task, subset = subs) 10 | expect_equal(mod$subset, c(1L, 3L, 4L)) 11 | expect_equal(length(mod$learner.model$y), sum(subs)) 12 | }) 13 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_clusterSVM.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_clusterSVM", { 3 | requirePackagesOrSkip("SwarmSVM", default.method = "load") 4 | 5 | parset.list1 = list( 6 | list(centers = 2), 7 | list(centers = 3, seed = 0) 8 | ) 9 | 10 | parset.list2 = list( 11 | list(), 12 | list(centers = 3, seed = 0) 13 | ) 14 | 15 | old.predicts.list = list() 16 | 17 | for (i in seq_along(parset.list1)) { 18 | parset = parset.list1[[i]] 19 | pars = list(data.matrix(binaryclass.train[, -61]), y = binaryclass.train[, 61]) 20 | pars = c(pars, parset) 21 | set.seed(getOption("mlr.debug.seed")) 22 | m = suppressMessages(do.call(SwarmSVM::clusterSVM, pars)) 23 | old.predicts.list[[i]] = predict(m, data.matrix(binaryclass.test[, -61]))$predictions 24 | } 25 | 26 | testSimpleParsets("classif.clusterSVM", binaryclass.df, binaryclass.target, 27 | binaryclass.train.inds, old.predicts.list, parset.list2) 28 | }) 29 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_evtree.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_evtree", { 3 | requirePackagesOrSkip("evtree", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(maxdepth = 2), 8 | list(ntrees = 200) 9 | ) 10 | 11 | old.predicts.list = list() 12 | old.probs.list = list() 13 | 14 | for (i in seq_along(parset.list)) { 15 | parset = parset.list[[i]] 16 | pars = list(formula = binaryclass.formula, data = binaryclass.train) 17 | pars = c(pars, parset) 18 | set.seed(getOption("mlr.debug.seed")) 19 | m = do.call(evtree::evtree, pars) 20 | old.predicts.list[[i]] = predict(m, newdata = binaryclass.test) 21 | p = predict(m, newdata = binaryclass.test, type = "prob") 22 | old.probs.list[[i]] = p[, 1] 23 | } 24 | 25 | testSimpleParsets("classif.evtree", binaryclass.df, binaryclass.target, 26 | binaryclass.train.inds, old.predicts.list, parset.list) 27 | testProbParsets("classif.evtree", binaryclass.df, binaryclass.target, 28 | binaryclass.train.inds, old.probs.list, parset.list) 29 | }) 30 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_fgam.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("fgam works for classifcation", { 3 | # errors on R 4.1 4 | skip_if(sessionInfo()$R.version$status == "Under development (unstable)") 5 | requirePackagesOrSkip("refund") 6 | dd = getTaskData(gunpoint.task, functionals.as = "matrix", target.extra = TRUE) 7 | matdd = list() 8 | matdd$fd = dd$data$fd 9 | hh = getBinomialTarget(gunpoint.task) 10 | matdd$X1 = hh$newtarget 11 | fit.af = pfr(formula = X1 ~ af(fd, Qtransform = TRUE, k = 3, m = 2), data = matdd, family = binomial()) 12 | lrn = makeLearner("classif.fgam", par.vals = list(mgcv.te_ti.k = 3L, mgcv.te_ti.m = 2)) 13 | m = train(lrn, gunpoint.task) 14 | cp = predict(m, task = gunpoint.task) 15 | expect_class(cp, "Prediction") 16 | 17 | # prob output 18 | lrn = makeLearner("classif.fgam", par.vals = list(mgcv.te_ti.k = 3L, mgcv.te_ti.m = 2), predict.type = "prob") 19 | m2 = train(lrn, gunpoint.task) 20 | cp2 = predict(m2, task = gunpoint.task) 21 | expect_class(cp2, "Prediction") 22 | }) 23 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_gaterSVM.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_gaterSVM", { 3 | requirePackagesOrSkip("SwarmSVM", default.method = "load") 4 | 5 | # Early Prediction 6 | model = SwarmSVM::gaterSVM(x = data.matrix(binaryclass.train[, -61]), 7 | y = binaryclass.train[, 61], m = 2, max.iter = 1, seed = 0) 8 | p = predict(model, data.matrix(binaryclass.test[, -61])) 9 | p = factor(p, labels = levels(binaryclass.train[, 61])) 10 | 11 | testSimple("classif.gaterSVM", binaryclass.df, binaryclass.target, 12 | binaryclass.train.inds, p, parset = list(m = 2, max.iter = 1, seed = 0)) 13 | 14 | # Prediction result containing only one class 15 | data = data.frame(a = c(1, 2, 1, 2), b = c(1, 1, 2, 2), c = c("a", "b", "a", "b")) 16 | traintask = makeClassifTask("train", data, "c") 17 | testtask = makeClusterTask("test", data.frame(a = c(1, 1), b = c(1, 1))) 18 | x = train(makeLearner("classif.gaterSVM", m = 2, seed = 0), traintask) 19 | result = predict(x, testtask)$data$response 20 | expect_equal(as.character(result), c("a", "a")) 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_lda.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_lda", { 3 | requirePackagesOrSkip("MASS", default.method = "load") 4 | 5 | m = MASS::lda(formula = multiclass.formula, data = multiclass.train) 6 | p = predict(m, newdata = multiclass.test) 7 | 8 | testSimple("classif.lda", multiclass.df, multiclass.target, multiclass.train.inds, p$class) 9 | testProb("classif.lda", multiclass.df, multiclass.target, multiclass.train.inds, p$posterior) 10 | 11 | tt = MASS::lda 12 | tp = function(model, newdata) predict(model, newdata)$class 13 | 14 | testCV("classif.lda", multiclass.df, multiclass.target, tune.train = tt, tune.predict = tp) 15 | }) 16 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_logreg.R: -------------------------------------------------------------------------------- 1 | test_that("classif_logreg", { 2 | # "did not converge": 3 | m = suppressWarnings(glm(formula = binaryclass.formula, data = binaryclass.train, family = binomial)) 4 | 5 | p = predict(m, newdata = binaryclass.test, type = "response") 6 | p.prob = 1 - p 7 | p.class = as.factor(binaryclass.class.levs[ifelse(p > 0.5, 2, 1)]) 8 | 9 | suppressWarnings(testSimple("classif.logreg", binaryclass.df, binaryclass.target, binaryclass.train.inds, p.class)) 10 | 11 | 12 | suppressWarnings(testProb("classif.logreg", binaryclass.df, binaryclass.target, binaryclass.train.inds, p.prob)) 13 | 14 | tt = function(formula, data) { 15 | glm(formula, data = data, family = binomial) 16 | } 17 | tp = function(model, newdata) { 18 | p = predict(model, newdata, type = "response") 19 | as.factor(binaryclass.class.levs[ifelse(p > 0.5, 2, 1)]) 20 | } 21 | 22 | suppressWarnings(testCV("classif.logreg", binaryclass.df, binaryclass.target, tune.train = tt, tune.predict = tp)) 23 | }) 24 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_naiveBayes.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_naiveBayes", { 3 | requirePackagesOrSkip("e1071", default.method = "load") 4 | 5 | m = e1071::naiveBayes(formula = multiclass.formula, data = multiclass.train) 6 | p = predict(m, newdata = multiclass.test[, -multiclass.class.col]) 7 | p2 = predict(m, newdata = multiclass.test[, -multiclass.class.col], 8 | type = "raw") 9 | 10 | testSimple("classif.naiveBayes", multiclass.df, multiclass.target, 11 | multiclass.train.inds, p) 12 | testProb("classif.naiveBayes", multiclass.df, multiclass.target, 13 | multiclass.train.inds, p2) 14 | 15 | tt = e1071::naiveBayes 16 | tp = function(model, newdata) predict(model, newdata[, -multiclass.class.col]) 17 | 18 | testCV("classif.naiveBayes", multiclass.df, multiclass.target, 19 | tune.train = tt, tune.predict = tp) 20 | }) 21 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_qda.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_qda", { 3 | requirePackagesOrSkip("MASS", default.method = "load") 4 | m = try(MASS::qda(formula = multiclass.formula, data = multiclass.train)) 5 | if (class(m) != "try-error") { 6 | p = predict(m, newdata = multiclass.test) 7 | } else { 8 | p = m 9 | } 10 | 11 | testSimple("classif.qda", multiclass.df, multiclass.target, 12 | multiclass.train.inds, p$class) 13 | testProb("classif.qda", multiclass.df, multiclass.target, 14 | multiclass.train.inds, p$posterior) 15 | 16 | tt = MASS::qda 17 | tp = function(model, newdata) predict(model, newdata)$class 18 | 19 | testCV("classif.qda", multiclass.df, multiclass.target, tune.train = tt, 20 | tune.predict = tp) 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_rFerns.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_rFerns", { 3 | requirePackagesOrSkip("rFerns", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(ferns = 200L, depth = 4L) 8 | ) 9 | 10 | old.predicts.list = list() 11 | 12 | for (i in seq_along(parset.list)) { 13 | parset = parset.list[[i]] 14 | parset = c(list(formula = binaryclass.formula, data = binaryclass.train), 15 | parset) 16 | set.seed(getOption("mlr.debug.seed")) 17 | m = do.call(rFerns::rFerns, parset) 18 | old.predicts.list[[i]] = factor(predict(m, binaryclass.test)) 19 | } 20 | 21 | testSimpleParsets("classif.rFerns", binaryclass.df, binaryclass.target, 22 | binaryclass.train.inds, 23 | old.predicts.list, parset.list) 24 | }) 25 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_ranger.R: -------------------------------------------------------------------------------- 1 | 2 | ## FIXME: Add "response" test if R seed is respected in ranger::predict() 3 | test_that("classif_ranger", { 4 | requirePackagesOrSkip("ranger", default.method = "load") 5 | 6 | parset.list = list( 7 | list(num.trees = 20), 8 | list(num.trees = 20, mtry = 4), 9 | list(num.trees = 20, min.node.size = 2) 10 | ) 11 | old.probs.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | parset = c(parset, list(data = binaryclass.train, 16 | formula = binaryclass.formula, write.forest = TRUE, probability = TRUE, 17 | respect.unordered.factors = TRUE)) 18 | set.seed(getOption("mlr.debug.seed")) 19 | m = do.call(ranger::ranger, parset) 20 | p = predict(m, data = binaryclass.test) 21 | old.probs.list[[i]] = p$predictions[, 1] 22 | } 23 | 24 | testProbParsets("classif.ranger", binaryclass.df, binaryclass.target, 25 | binaryclass.train.inds, old.probs.list, parset.list) 26 | }) 27 | -------------------------------------------------------------------------------- /tests/testthat/test_classif_sda.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_sda", { 3 | requirePackagesOrSkip("sda", default.method = "load") 4 | 5 | capture.output({ 6 | m = sda::sda(as.matrix(dropNamed(multiclass.train, multiclass.target)), 7 | multiclass.train[, multiclass.target]) 8 | p = sda::predict.sda(m, as.matrix(dropNamed(multiclass.test, 9 | multiclass.target))) 10 | }) 11 | 12 | testSimple("classif.sda", multiclass.df, multiclass.target, 13 | multiclass.train.inds, p$class) 14 | testProb("classif.sda", multiclass.df, multiclass.target, 15 | multiclass.train.inds, p$posterior) 16 | }) 17 | -------------------------------------------------------------------------------- /tests/testthat/test_cluster_Cobweb.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("cluster_Cobweb", { 3 | requirePackagesOrSkip("RWeka", default.method = "load") 4 | 5 | parset.list = list( 6 | list() 7 | ) 8 | 9 | old.predicts.list = list() 10 | 11 | for (i in seq_along(parset.list)) { 12 | parset = parset.list[[i]] 13 | ctrl = do.call(RWeka::Weka_control, parset) 14 | m = RWeka::Cobweb(noclass.train, control = ctrl) 15 | p = predict(m, noclass.test) + 1 16 | old.predicts.list[[i]] = p 17 | } 18 | 19 | testSimpleParsets("cluster.Cobweb", noclass.df, character(0L), 20 | noclass.train.inds, old.predicts.list, parset.list) 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_cluster_EM.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("cluster_EM", { 3 | requirePackagesOrSkip("RWeka", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(N = 10) 8 | ) 9 | 10 | old.predicts.list = list() 11 | 12 | for (i in seq_along(parset.list)) { 13 | parset = parset.list[[i]] 14 | ctrl = do.call(RWeka::Weka_control, parset) 15 | m = RWeka::make_Weka_clusterer("weka/clusterers/EM")(noclass.train, 16 | control = ctrl) 17 | p = predict(m, noclass.test) + 1 18 | old.predicts.list[[i]] = p 19 | } 20 | 21 | testSimpleParsets("cluster.EM", noclass.df, character(0L), 22 | noclass.train.inds, old.predicts.list, parset.list) 23 | }) 24 | -------------------------------------------------------------------------------- /tests/testthat/test_cluster_FarthestFirst.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("cluster_FarthestFirst", { 3 | requirePackagesOrSkip("RWeka", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(N = 3) 8 | ) 9 | 10 | old.predicts.list = list() 11 | 12 | for (i in seq_along(parset.list)) { 13 | parset = parset.list[[i]] 14 | ctrl = do.call(RWeka::Weka_control, parset) 15 | m = RWeka::FarthestFirst(noclass.train, control = ctrl) 16 | p = predict(m, noclass.test) + 1 17 | old.predicts.list[[i]] = p 18 | } 19 | 20 | testSimpleParsets("cluster.FarthestFirst", noclass.df, character(0L), 21 | noclass.train.inds, old.predicts.list, parset.list) 22 | }) 23 | -------------------------------------------------------------------------------- /tests/testthat/test_cluster_SimpleKMeans.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("cluster_SimpleKMeans", { 3 | requirePackagesOrSkip("RWeka", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(N = 5L) 8 | ) 9 | 10 | old.predicts.list = list() 11 | 12 | for (i in seq_along(parset.list)) { 13 | parset = parset.list[[i]] 14 | ctrl = do.call(RWeka::Weka_control, parset) 15 | m = RWeka::SimpleKMeans(noclass.train, control = ctrl) 16 | p = predict(m, noclass.test) + 1L 17 | old.predicts.list[[i]] = p 18 | } 19 | 20 | testSimpleParsets("cluster.SimpleKMeans", noclass.df, character(0L), 21 | noclass.train.inds, old.predicts.list, parset.list) 22 | }) 23 | -------------------------------------------------------------------------------- /tests/testthat/test_cluster_XMeans.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("cluster_XMeans", { 3 | skip_on_os("windows") 4 | skip_on_cran() 5 | 6 | requirePackagesOrSkip("RWeka", default.method = "load") 7 | RWeka::WPM("refresh-cache") 8 | RWeka::WPM("install-package", "XMeans") 9 | 10 | parset.list = list( 11 | list(), 12 | list(I = 1) 13 | ) 14 | 15 | old.predicts.list = list() 16 | 17 | for (i in seq_along(parset.list)) { 18 | parset = parset.list[[i]] 19 | ctrl = do.call(RWeka::Weka_control, parset) 20 | m = RWeka::XMeans(noclass.train, control = ctrl) 21 | p = predict(m, noclass.test) + 1 22 | old.predicts.list[[i]] = p 23 | } 24 | 25 | testSimpleParsets("cluster.XMeans", noclass.df, character(0L), 26 | noclass.train.inds, old.predicts.list, parset.list) 27 | }) 28 | -------------------------------------------------------------------------------- /tests/testthat/test_cluster_dbscan.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("cluster_dbscan", { 3 | requirePackagesOrSkip("fpc", default.method = "load") 4 | 5 | parset.list = list( 6 | list() 7 | ) 8 | old.predicts.list = list() 9 | 10 | for (i in seq_along(parset.list)) { 11 | parset = parset.list[[i]] 12 | m = fpc::dbscan(noclass.train, eps = 1) 13 | p = as.integer(predict(m, noclass.train, newdata = noclass.test)) 14 | p[p == 0] = NA 15 | old.predicts.list[[i]] = p 16 | } 17 | 18 | testSimpleParsets("cluster.dbscan", noclass.df, character(0L), 19 | noclass.train.inds, old.predicts.list, parset.list) 20 | }) 21 | -------------------------------------------------------------------------------- /tests/testthat/test_featsel_analyzeFeatSelResult.R: -------------------------------------------------------------------------------- 1 | 2 | cat("analyzeFeatSelResult") 3 | test_that("analyzeFeatSelResult", { 4 | lrn = makeLearner("classif.rpart") 5 | rdesc = makeResampleDesc("Holdout") 6 | ctrl.seq = makeFeatSelControlSequential(method = "sfs") 7 | sf.seq = selectFeatures(learner = lrn, task = multiclass.task, 8 | resampling = rdesc, control = ctrl.seq, show.info = FALSE) 9 | expect_output(analyzeFeatSelResult(sf.seq, reduce = TRUE), "Petal.Width") 10 | }) 11 | 12 | test_that("analyzeFeatSelResult with tune threshold (cf. issue #245)", { 13 | ctrl = makeFeatSelControlSequential(method = "sfs", alpha = 0.1, 14 | tune.threshold = TRUE) 15 | rdesc = makeResampleDesc("Holdout") 16 | lrn = makeLearner("classif.lda", predict.type = "prob") 17 | task = subsetTask(sonar.task, features = paste("V", 11:16, sep = "")) 18 | sfeats = selectFeatures(learner = lrn, task = task, resampling = rdesc, 19 | control = ctrl, show.info = FALSE) 20 | expect_output(analyzeFeatSelResult(sfeats, reduce = TRUE), "V11") 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_featsel_rankSimpleFilters.R: -------------------------------------------------------------------------------- 1 | 2 | cat("rankSimpleFilters") 3 | test_that("base filters of ensemble filters are ranked correctly", { 4 | requirePackagesOrSkip("Hmisc", default.method = "load") 5 | 6 | filters.ranked = rankBaseFilters(task.filters.rank, 7 | method = c("univariate.model.score", "variance"), 8 | nselect = 9, more.args = list()) 9 | 10 | # split into groups to check the ordering of vars "value" and "rank" for each 11 | data.split = split(filters.ranked, filters.ranked$filter) 12 | 13 | foo = lapply(data.split, function(x) { 14 | expect_false(is.unsorted(x[["value"]])) 15 | expect_false(is.unsorted(x[["rank"]])) 16 | }) 17 | 18 | # check that the highest value has also the highest rank 19 | expect_true(which.max(data.split[[1]]$value) == which.max(data.split[[1]]$rank)) 20 | expect_true(which.max(data.split[[2]]$value) == which.max(data.split[[2]]$rank)) 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_featsel_selectFeaturesSequential.R: -------------------------------------------------------------------------------- 1 | 2 | cat("selectFeaturesSequential") 3 | # This used to cause an error. Now ensure it does not. 4 | test_that("no crash with sffs", { 5 | p = mlbench::mlbench.waveform(1000) 6 | dataset = as.data.frame(p) 7 | dataset = droplevels(subset(dataset, classes != 3)) 8 | 9 | m.ct = makeClassifTask(data = dataset, target = "classes") 10 | ctrl = makeFeatSelControlSequential(method = "sffs", maxit = NA, 11 | alpha = 0.001) 12 | m.l = makeLearner("classif.logreg", predict.type = "prob") 13 | inner = makeResampleDesc("Holdout", stratify = TRUE) 14 | lrn = makeFeatSelWrapper(m.l, resampling = inner, control = ctrl) 15 | outer = makeResampleDesc("CV", iters = 2, stratify = TRUE) 16 | # No error occurs 17 | expect_error(resample(lrn, m.ct, outer, extract = getFeatSelResult, 18 | measures = list(mlr::auc, mlr::acc, mlr::brier), models = TRUE), NA) 19 | }) 20 | -------------------------------------------------------------------------------- /tests/testthat/test_lint.R: -------------------------------------------------------------------------------- 1 | # run only on 2 | # - PR 3 | # - R-release 4 | # - Travis 5 | if (Sys.getenv("TRAVIS") == "true" && Sys.getenv("TRAVIS_PULL_REQUEST") != "false" && Sys.getenv("TRAVIS_R_VERSION_STRING") == "release") { 6 | test_that("lint check", { 7 | library("lintr") 8 | library("rex") 9 | # linters are defined in help_lint.R 10 | expect_lint_free(path = Sys.getenv("TRAVIS_BUILD_DIR"), linters = linters) 11 | }) 12 | } 13 | -------------------------------------------------------------------------------- /tests/testthat/test_multilabel_cforest.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("multilabel_cforest", { 3 | requirePackagesOrSkip("party", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(control = party::cforest_unbiased(mtry = 2)), 8 | list(control = party::cforest_unbiased(ntree = 200)) 9 | ) 10 | parset.list2 = list( 11 | list(), 12 | list(mtry = 2), 13 | list(ntree = 200) 14 | ) 15 | 16 | old.probs.list = list() 17 | 18 | for (i in seq_along(parset.list)) { 19 | parset = parset.list[[i]] 20 | pars = list(multilabel.formula, data = multilabel.train) 21 | pars = c(pars, parset) 22 | set.seed(getOption("mlr.debug.seed")) 23 | m = do.call(party::cforest, pars) 24 | # multivariate cforest can only predict probs 25 | p = predict(m, newdata = multilabel.test) 26 | p2 = do.call(rbind, p) 27 | old.probs.list[[i]] = data.frame(p2) 28 | } 29 | 30 | testProbParsets("multilabel.cforest", multilabel.df, multilabel.target, 31 | multilabel.train.inds, old.probs.list, parset.list2) 32 | }) 33 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_GPfit.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_GPfit", { 3 | testFun = function(x) { 4 | return(4 * x[, 1]^2 - 2 * x[, 2]) 5 | } 6 | train.inds = 1:20 7 | s = seq(0, 1, by = 0.2) 8 | x = expand.grid(s, s) 9 | y = testFun(x) 10 | gpfit.test.df = cbind.data.frame(x, y) 11 | colnames(gpfit.test.df) = c("x1", "x2", "y") 12 | m = GPfit::GP_fit(x[train.inds, ], y[train.inds]) 13 | p = predict(m, xnew = x[-train.inds, ]) 14 | testSimple("regr.GPfit", gpfit.test.df, "y", train.inds, p$Y_hat) 15 | }) 16 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_IBk.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_IBk", { 3 | requirePackagesOrSkip("RWeka", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(K = 2) 8 | ) 9 | 10 | old.predicts.list = list() 11 | 12 | for (i in seq_along(parset.list)) { 13 | parset = parset.list[[i]] 14 | ctrl = do.call(RWeka::Weka_control, parset) 15 | pars = list(regr.formula, data = regr.train) 16 | pars = c(pars, parset) 17 | m = RWeka::IBk(regr.formula, regr.train, control = ctrl) 18 | p = predict(m, newdata = regr.test) 19 | old.predicts.list[[i]] = p 20 | } 21 | 22 | testSimpleParsets("regr.IBk", regr.df, regr.target, regr.train.inds, 23 | old.predicts.list, parset.list) 24 | }) 25 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_RRF.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_RRF", { 3 | requirePackages("RRF", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(ntree = 5, mtry = 2), 8 | list(ntree = 5, mtry = 4) 9 | ) 10 | 11 | old.predicts.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | pars = list(formula = regr.formula, data = regr.train) 16 | pars = c(pars, parset) 17 | set.seed(getOption("mlr.debug.seed")) 18 | m = do.call(RRF::RRF, pars) 19 | p = predict(m, newdata = regr.test, type = "response") 20 | old.predicts.list[[i]] = p 21 | } 22 | 23 | testSimpleParsets("regr.RRF", regr.df, regr.target, regr.train.inds, 24 | old.predicts.list, parset.list) 25 | }) 26 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_bgp.R: -------------------------------------------------------------------------------- 1 | test_that("regr_bgp", { 2 | skip("not runnable in parallel") 3 | # due to https://github.com/cran/tgp/blob/689168f5e43941e2808c36bc43603329641028db/R/tgp.postprocess.R#L75 # nocov 4 | requirePackagesOrSkip("tgp", default.method = "load") 5 | 6 | parset.list = list( 7 | list(meanfn = "linear", bprior = "bflat", corr = "expsep") 8 | ) 9 | inds = 1:50 10 | y = regr.num.df[inds, regr.num.target] 11 | old.predicts.list = list() 12 | for (i in seq_along(parset.list)) { 13 | parset = parset.list[[i]] 14 | pars = list( 15 | X = regr.num.df[inds, -regr.num.class.col], Z = y, verb = 0, 16 | pred.n = FALSE) 17 | pars = c(pars, parset) 18 | set.seed(getOption("mlr.debug.seed")) 19 | m = do.call(tgp::bgp, pars) 20 | old.predicts.list[[i]] = predict(m, 21 | XX = regr.num.df[-inds, -regr.num.class.col], pred.n = FALSE)$ZZ.km 22 | } 23 | testSimpleParsets( 24 | "regr.bgp", regr.num.df, regr.num.target, inds, 25 | old.predicts.list, parset.list) 26 | }) 27 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_blm.R: -------------------------------------------------------------------------------- 1 | test_that("regr_blm", { 2 | skip("not runnable in parallel") 3 | # due to https://github.com/cran/tgp/blob/689168f5e43941e2808c36bc43603329641028db/R/tgp.postprocess.R#L75 # nocov 4 | requirePackagesOrSkip("tgp", default.method = "load") 5 | 6 | parset.list = list( 7 | list(meanfn = "linear", bprior = "bflat"), 8 | list(meanfn = "linear", bprior = "bmle"), 9 | list(meanfn = "constant") 10 | ) 11 | y = regr.num.train[, regr.num.target] 12 | old.predicts.list = list() 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | pars = list( 16 | X = regr.num.train[, -regr.num.class.col], Z = y, verb = 0, 17 | pred.n = FALSE) 18 | pars = c(pars, parset) 19 | m = do.call(tgp::blm, pars) 20 | 21 | old.predicts.list[[i]] = predict(m, 22 | XX = regr.num.test[, -regr.num.class.col], pred.n = FALSE)$ZZ.km 23 | } 24 | testSimpleParsets( 25 | "regr.blm", regr.num.df, regr.num.target, 26 | regr.num.train.inds, old.predicts.list, parset.list) 27 | }) 28 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_brnn.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_brnn", { 3 | requirePackagesOrSkip("brnn", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(neurons = 3L), 8 | list(mu = 0.001) 9 | ) 10 | 11 | old.predicts.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | pars = list(formula = regr.formula, data = regr.train) 15 | pars = c(pars, parset.list[[i]]) 16 | set.seed(getOption("mlr.debug.seed")) 17 | capture.output({ 18 | m = do.call(brnn::brnn, pars) 19 | }) 20 | p = predict(m, newdata = regr.test) 21 | old.predicts.list[[i]] = p 22 | } 23 | 24 | testSimpleParsets("regr.brnn", regr.df, regr.target, regr.train.inds, 25 | old.predicts.list, parset.list) 26 | }) 27 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_crs.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_crs", { 3 | requirePackagesOrSkip("crs", default.method = "load") 4 | 5 | parset.list = list( 6 | list(nmulti = 1, cv = "none"), 7 | list(degree = rep(3, 12), nmulti = 1, cv = "none"), 8 | list(segments = rep(3, 12), nmulti = 1, cv = "none") 9 | ) 10 | 11 | old.predicts.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | pars = list(regr.formula, data = regr.train) 16 | pars = c(pars, parset) 17 | suppressWarnings({ 18 | m = do.call(crs::crs, pars) 19 | }) 20 | # suppressed warnings: 21 | # some 'x' values beyond boundary knots may cause ill-conditioned bases 22 | # NaNs produced 23 | pred = suppressWarnings(predict(m, newdata = regr.test)) 24 | attr(pred, "lwr") = NULL 25 | attr(pred, "upr") = NULL 26 | old.predicts.list[[i]] = pred 27 | } 28 | 29 | suppressWarnings(testSimpleParsets("regr.crs", regr.df, regr.target, 30 | regr.train.inds, old.predicts.list, parset.list)) 31 | }) 32 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_ctree.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_ctree", { 3 | requirePackagesOrSkip("party", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(minsplit = 10, mincriterion = 0.005), 8 | list(minsplit = 50, mincriterion = 0.05), 9 | list(minsplit = 50, mincriterion = 0.999), 10 | list(minsplit = 1, mincriterion = 0.0005) 11 | ) 12 | 13 | old.predicts.list = list() 14 | 15 | for (i in seq_along(parset.list)) { 16 | parset = parset.list[[i]] 17 | ctrl = do.call(party::ctree_control, parset) 18 | m = party::ctree(formula = regr.formula, data = regr.train, controls = ctrl) 19 | p = predict(m, newdata = regr.test, type = "response")[, 1L] 20 | old.predicts.list[[i]] = p 21 | } 22 | 23 | testSimpleParsets("regr.ctree", regr.df, regr.target, regr.train.inds, 24 | old.predicts.list, parset.list) 25 | }) 26 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_cubist.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_cubist", { 3 | requirePackagesOrSkip("Cubist", default.method = "load") 4 | 5 | parset.list1 = list( 6 | list(), 7 | list(committees = 2L), 8 | list(control = Cubist::cubistControl(extrapolation = 50L, rules = 50L)) 9 | ) 10 | parset.list2 = list( 11 | list(), 12 | list(committees = 2L), 13 | list(extrapolation = 50, rules = 50L) 14 | ) 15 | 16 | old.predicts.list = list() 17 | X = regr.train[, setdiff(names(regr.train), regr.target)] 18 | y = regr.train[, regr.target] 19 | 20 | for (i in seq_along(parset.list1)) { 21 | parset = parset.list1[[i]] 22 | parset = c(list(x = X, y = y), parset) 23 | m = do.call(Cubist::cubist, parset) 24 | p = predict(m, newdata = regr.test) 25 | old.predicts.list[[i]] = p 26 | } 27 | 28 | testSimpleParsets("regr.cubist", regr.df, regr.target, regr.train.inds, 29 | old.predicts.list, parset.list2) 30 | }) 31 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_earth.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_earth", { 3 | requirePackagesOrSkip("earth", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(degree = 2), 8 | list(penalty = 4) 9 | ) 10 | 11 | old.predicts.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | pars = list(regr.formula, data = regr.train) 16 | pars = c(pars, parset) 17 | m = do.call(earth::earth, pars) 18 | old.predicts.list[[i]] = predict(m, newdata = regr.test)[, 1] 19 | } 20 | 21 | testSimpleParsets("regr.earth", regr.df, regr.target, regr.train.inds, 22 | old.predicts.list, parset.list) 23 | }) 24 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_evtree.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_evtree", { 3 | requirePackagesOrSkip("evtree", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(maxdepth = 2), 8 | list(ntrees = 200) 9 | ) 10 | 11 | old.predicts.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | pars = list(regr.formula, data = regr.train) 16 | pars = c(pars, parset) 17 | m = do.call(evtree::evtree, pars) 18 | old.predicts.list[[i]] = as.vector(predict(m, newdata = regr.test)) 19 | } 20 | 21 | testSimpleParsets("regr.evtree", regr.df, regr.target, regr.train.inds, 22 | old.predicts.list, parset.list) 23 | }) 24 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_featureless.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_featureless", { 3 | df = data.frame( 4 | y = c(1, 2, 3, 3, 3), 5 | x = rep(1, 5) 6 | ) 7 | method = c("mean", "median") 8 | task = makeRegrTask(data = df, target = "y") 9 | 10 | # compute predictions manually 11 | expected.response = list( 12 | median = 3, 13 | mean = (1 + 2 + 3 + 3 + 3) / 5 14 | ) 15 | 16 | for (m in method) { 17 | lrn = makeLearner("regr.featureless", method = m) 18 | mod = train(lrn, task) 19 | # test content of learner model 20 | expect_equal(getLearnerModel(mod)$response, expected.response[[m]]) 21 | # test prediction works properly 22 | n = 10 23 | test = data.frame(rep(1, n)) 24 | p = predict(mod, newdata = test) 25 | expect_equal(getPredictionResponse(p), rep(expected.response[[m]], n)) 26 | # test that printer works correctly 27 | expect_output(print(lrn), "featureless") 28 | expect_output(print(lrn), m) 29 | } 30 | }) 31 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_gausspr.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_gausspr", { 3 | requirePackages("kernlab", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(kernel = "splinedot"), 8 | list(var = 0.02) 9 | ) 10 | 11 | old.predicts.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | pars = list(regr.formula, data = regr.train) 16 | pars = c(pars, parset) 17 | set.seed(getOption("mlr.debug.seed")) 18 | capture.output({ 19 | m = do.call(kernlab::gausspr, pars) 20 | }) 21 | p = kernlab::predict(m, newdata = regr.test) 22 | old.predicts.list[[i]] = p[, 1] 23 | } 24 | testSimpleParsets("regr.gausspr", regr.df, regr.target, regr.train.inds, 25 | old.predicts.list, parset.list) 26 | }) 27 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_h2oglm.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_h2oglm", { 3 | skip_on_ci() 4 | requirePackages("h2o", default.method = "load") 5 | foo = capture.output(h2o::h2o.init()) 6 | 7 | parset.list = list( 8 | list(), 9 | list(alpha = 1), 10 | list(alpha = 1, lambda = 0.2) 11 | ) 12 | old.predicts.list = list() 13 | 14 | for (i in seq_along(parset.list)) { 15 | parset = parset.list[[i]] 16 | parset = c(parset, list(x = colnames(regr.train[, -regr.class.col]), 17 | y = regr.target, 18 | training_frame = h2o::as.h2o(regr.train))) 19 | set.seed(getOption("mlr.debug.seed")) 20 | m = do.call(h2o::h2o.glm, parset) 21 | p = predict(m, newdata = h2o::as.h2o(regr.test)) 22 | old.predicts.list[[i]] = as.data.frame(p)[, 1L] 23 | } 24 | 25 | testSimpleParsets("regr.h2o.glm", regr.df, regr.target, regr.train.inds, 26 | old.predicts.list, parset.list) 27 | }) 28 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_ksvm.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_ksvm", { 3 | requirePackagesOrSkip("kernlab", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(C = 0.3, kpar = list(sigma = 2)), 8 | list(C = 0.3, kpar = list(sigma = 2), epsilon = 0.3) 9 | ) 10 | parset.list2 = list( 11 | list(), 12 | list(C = 0.3, sigma = 2), 13 | list(C = 0.3, sigma = 2, epsilon = 0.3) 14 | ) 15 | 16 | old.predicts.list = list() 17 | 18 | for (i in seq_along(parset.list)) { 19 | parset = parset.list[[i]] 20 | pars = list(regr.formula, data = regr.train) 21 | pars = c(pars, parset) 22 | set.seed(getOption("mlr.debug.seed")) 23 | capture.output({ 24 | m = do.call(kernlab::ksvm, pars) 25 | }) 26 | p = kernlab::predict(m, newdata = regr.test) 27 | old.predicts.list[[i]] = p[, 1] 28 | } 29 | testSimpleParsets("regr.ksvm", regr.df, regr.target, regr.train.inds, 30 | old.predicts.list, parset.list2) 31 | }) 32 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_laGP.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_laGP", { 3 | requirePackagesOrSkip("laGP", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(start = 6, end = 49, close = 50) 8 | ) 9 | dd = regr.num.df[1:100, ] 10 | old.predicts.list = list() 11 | des1 = dd[1:51, setdiff(colnames(dd), regr.num.target)] 12 | des2 = dd[52:100, setdiff(colnames(dd), regr.num.target)] 13 | y = dd[1:51, regr.num.target] 14 | for (i in seq_along(parset.list)) { 15 | parset = parset.list[[i]] 16 | pars = list(X = des1[, -regr.num.class.col], Z = y, 17 | XX = des2[, -regr.num.class.col], verb = 0, 18 | Xi.ret = FALSE) 19 | pars = c(pars, parset) 20 | old.predicts.list[[i]] = do.call(laGP::aGP, pars)$mean 21 | } 22 | testSimpleParsets("regr.laGP", dd, regr.num.target, 1:51, old.predicts.list, 23 | parset.list) 24 | }) 25 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_lm.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_lm", { 3 | pars = list(regr.formula, data = regr.train) 4 | m = do.call(lm, pars) 5 | p = predict(m, newdata = regr.test) 6 | 7 | testSimple("regr.lm", regr.df, regr.target, regr.train.inds, p) 8 | }) 9 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_plsr.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_plsr", { 3 | requirePackagesOrSkip("pls", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(ncomp = 1), 8 | list(ncomp = 3, method = "simpls") 9 | ) 10 | 11 | old.predicts.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | pars = list(regr.formula, data = regr.train) 16 | pars = c(pars, parset) 17 | m = do.call(pls::plsr, pars) 18 | old.predicts.list[[i]] = pls:::predict.mvr(m, newdata = regr.test, 19 | comps = 1:m$ncomp)[, 1] 20 | } 21 | 22 | testSimpleParsets("regr.plsr", regr.df, regr.target, regr.train.inds, 23 | old.predicts.list, parset.list) 24 | }) 25 | -------------------------------------------------------------------------------- /tests/testthat/test_regr_rsm.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_rsm", { 3 | requirePackagesOrSkip("rsm", default.method = "load") 4 | 5 | data = regr.df[, c("b", "lstat", "medv")] 6 | pars = list(medv ~ FO(b, lstat), data = data[regr.train.inds, ]) 7 | m = do.call(rsm::rsm, pars) 8 | p = predict(m, newdata = regr.test) 9 | 10 | testSimple("regr.rsm", data, regr.target, regr.train.inds, p) 11 | }) 12 | -------------------------------------------------------------------------------- /tests/testthat/test_surv_coxph.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("surv_coxph", { 3 | requirePackagesOrSkip("survival", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(iter.max = 1), 8 | list(iter.max = 10), 9 | list(iter.max = 50) 10 | ) 11 | 12 | old.predicts.list = list() 13 | 14 | for (i in seq_along(parset.list)) { 15 | parset = parset.list[[i]] 16 | pars = list(formula = surv.formula, data = surv.train) 17 | pars = c(pars, parset) 18 | m = do.call(survival::coxph, pars) 19 | p = predict(m, newdata = surv.test, type = "lp") 20 | old.predicts.list[[i]] = p 21 | } 22 | 23 | testSimpleParsets("surv.coxph", surv.df, surv.target, surv.train.inds, 24 | old.predicts.list, parset.list) 25 | }) 26 | -------------------------------------------------------------------------------- /tests/testthat/test_surv_gbm.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("surv_gbm", { 3 | requirePackagesOrSkip("gbm", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(n.trees = 10L), 8 | list(interaction.depth = 2L, n.trees = 10L) 9 | ) 10 | 11 | old.predicts.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | pars = list(surv.formula, data = surv.train, distribution = "coxph") 16 | pars = c(pars, parset) 17 | set.seed(getOption("mlr.debug.seed")) 18 | capture.output({ 19 | m = do.call(gbm::gbm, pars) 20 | }) 21 | p = gbm::predict.gbm(m, newdata = surv.test, n.trees = m$n.trees) 22 | old.predicts.list[[i]] = p 23 | } 24 | 25 | testSimpleParsets("surv.gbm", surv.df, surv.target, surv.train.inds, 26 | old.predicts.list, parset.list) 27 | }) 28 | -------------------------------------------------------------------------------- /tests/testthat/test_surv_rpart.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("surv_rpart", { 3 | requirePackagesOrSkip("rpart", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(minsplit = 10, cp = 0.005), 8 | list(minsplit = 50, cp = 0.05), 9 | list(minsplit = 50, cp = 0.999), 10 | list(minsplit = 1, cp = 0.0005) 11 | ) 12 | 13 | old.predicts.list = list() 14 | old.probs.list = list() 15 | 16 | for (i in seq_along(parset.list)) { 17 | parset = parset.list[[i]] 18 | pars = list(formula = surv.formula, data = surv.train) 19 | pars = c(pars, parset) 20 | m = do.call(rpart::rpart, pars) 21 | p = predict(m, newdata = surv.test) 22 | old.predicts.list[[i]] = p 23 | } 24 | 25 | testSimpleParsets("surv.rpart", surv.df, surv.target, surv.train.inds, 26 | old.predicts.list, parset.list) 27 | }) 28 | -------------------------------------------------------------------------------- /tests/testthat/test_tune_getTuneResultOptPath.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("getTuneResultOptPath", { 3 | ctrl = makeTuneControlRandom(maxit = 3L) 4 | rdesc = makeResampleDesc("CV", iters = 3L) 5 | ps = makeParamSet( 6 | makeDiscreteParam("C", values = seq(1:10))) 7 | rdesc = makeResampleDesc("CV", iters = 3L) 8 | res = tuneParams("classif.ksvm", task = iris.task, resampling = rdesc, 9 | par.set = ps, control = ctrl) 10 | 11 | expect_equal(res$opt.path, getTuneResultOptPath(res, as.df = FALSE)) 12 | expect_equal(as.data.frame(res$opt.path), getTuneResultOptPath(res)) 13 | }) 14 | -------------------------------------------------------------------------------- /thirdparty/XMeans1.0.4.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/thirdparty/XMeans1.0.4.zip -------------------------------------------------------------------------------- /thirdparty/gen_families.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo '\name{mlrFamilies}' 3 | echo '\alias{mlrFamilies}' 4 | echo '\title{mlr documentation families}' 5 | echo '\description{List of all mlr documentation families with members.}' 6 | echo '\arguments{' 7 | awk 'BEGIN { fam = 0 } /@family/ { fam = $3 } /^[a-z]/ { if(fam && $1 != "NULL") { fams[fam] = fams[fam] $1 ", "; fam = 0 } } END { for(fam in fams) { sub(/, $/, "", fams[fam]); print "\\item{" fam "}{" fams[fam] "}" } }' R/* | sort 8 | echo '}' 9 | -------------------------------------------------------------------------------- /tic.R: -------------------------------------------------------------------------------- 1 | if (Sys.info()[["sysname"]] != "Windows") { 2 | get_stage("script") %>% 3 | add_code_step(RWeka::WPM("refresh-cache")) %>% 4 | add_code_step(RWeka::WPM("install-package", "XMeans")) 5 | } 6 | 7 | # R CMD Check 8 | do_package_checks(error_on = "warning", codecov = FALSE) 9 | 10 | # pkgdown 11 | if (ci_on_ghactions() && ci_has_env("BUILD_PKGDOWN")) { 12 | get_stage("before_deploy") %>% 13 | add_step(step_install_github("mlr-org/mlr3pkgdowntemplate")) %>% 14 | add_step(step_install_cran("GGally")) 15 | do_pkgdown() 16 | } 17 | 18 | if (ci_is_env("CODECOV", "true")) { 19 | get_stage("after_success") %>% 20 | add_code_step(RWeka::WPM("refresh-cache")) %>% 21 | add_code_step(RWeka::WPM("install-package", "XMeans")) %>% 22 | add_code_step(covr::codecov()) 23 | } 24 | -------------------------------------------------------------------------------- /todo-files/RLearner_classif_llr.R: -------------------------------------------------------------------------------- 1 | makeRLearner.classif.llr = function() { 2 | makeRLearnerClassif( 3 | cl = "classif.llr", 4 | package = "locCLass", 5 | oneclass = FALSE, 6 | twoclass = TRUE, 7 | multiclass = FALSE, 8 | missings = FALSE, 9 | numerics = TRUE, 10 | factors = TRUE, 11 | prob = TRUE, 12 | weights = FALSE 13 | ) 14 | } 15 | 16 | trainLearner.classif.llr = function(.learner, .task, .subset, ...) { 17 | f = getTaskFormula(.task) 18 | llr(f, data = getTaskData(.task, .subset), ...) 19 | } 20 | 21 | predictLearner.classif.llr = function(.learner, .model, .newdata, ...) { 22 | p = predict(.model$learner.model, newdata = .newdata, ...) 23 | if (.learner$predict.type == "response") { 24 | return(p$class) 25 | } else { 26 | return(p$posterior) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /todo-files/RLearner_regr_bagEarth.R: -------------------------------------------------------------------------------- 1 | makeRLearner.regr.bagEarth = function() { 2 | makeRLearnerRegr( 3 | cl = "regr.bagEarth", 4 | package = "caret", 5 | missings = FALSE, 6 | numerics = TRUE, 7 | factors = TRUE, 8 | se = FALSE, 9 | weights = FALSE 10 | ) 11 | } 12 | 13 | trainLearner.regr.bagEarth = function(.learner, .task, .subset, ...) { 14 | f = getTaskFormula(.task) 15 | d = getTaskData(.task, .subset) 16 | if (.task$task.desc$has.weights) { 17 | f = getTaskFormula(.task) 18 | bagEarth(f, data = getTaskData(.task, .subset), ...) 19 | } else { 20 | bagEarth(f, data = d, ...) 21 | } 22 | } 23 | 24 | predictLearner.regr.bagEarth = function(.learner, .model, .newdata, ...) { 25 | predict.bagEarth(.model$learner.model, newdata = .newdata) 26 | } 27 | -------------------------------------------------------------------------------- /todo-files/RLearner_regr_icr.R: -------------------------------------------------------------------------------- 1 | makeRLearner.regr.icr = function() { 2 | makeRLearnerRegr( 3 | cl = "regr.icr", 4 | package = "stats", 5 | missings = FALSE, 6 | numerics = TRUE, 7 | factors = TRUE, 8 | se = FALSE, 9 | weights = FALSE 10 | ) 11 | } 12 | 13 | trainLearner.regr.icr = function(.learner, .task, .subset, ...) { 14 | f = getTaskFormula(.task) 15 | icr(f, data = getTaskData(.task, .subset), ...) 16 | } 17 | 18 | predictLearner.regr.icr = function(.learner, .model, .newdata, ...) { 19 | predict(.model$learner.model, newdata = .newdata) 20 | } 21 | -------------------------------------------------------------------------------- /todo-files/StackResults/classifBaseLearner.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/todo-files/StackResults/classifBaseLearner.pdf -------------------------------------------------------------------------------- /todo-files/StackResults/classifStack_acc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/todo-files/StackResults/classifStack_acc.pdf -------------------------------------------------------------------------------- /todo-files/StackResults/classifStack_auc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/todo-files/StackResults/classifStack_auc.pdf -------------------------------------------------------------------------------- /todo-files/StackResults/multiclassifBaseLearner.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/todo-files/StackResults/multiclassifBaseLearner.pdf -------------------------------------------------------------------------------- /todo-files/StackResults/multiclassifStack_acc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/todo-files/StackResults/multiclassifStack_acc.pdf -------------------------------------------------------------------------------- /todo-files/StackResults/multiclassifStack_auc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/todo-files/StackResults/multiclassifStack_auc.pdf -------------------------------------------------------------------------------- /todo-files/StackResults/regrBaseLearner.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/todo-files/StackResults/regrBaseLearner.pdf -------------------------------------------------------------------------------- /todo-files/StackResults/regrStack.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/todo-files/StackResults/regrStack.pdf -------------------------------------------------------------------------------- /todo-files/TuneControlOptim.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | #' @rdname TuneControl 3 | makeTuneControlOptim = function(same.resampling.instance = TRUE, start, ...) { 4 | makeTuneControl(same.resampling.instance = same.resampling.instance, 5 | start = start, ..., cl = "TuneControlOptim") 6 | } 7 | -------------------------------------------------------------------------------- /todo-files/makeTaskDescFromData.R: -------------------------------------------------------------------------------- 1 | makeTaskDescFromChangedData = function(task, data) { 2 | td = task$task.desc 3 | i = which(colnames(data) %in% c(target)) 4 | td$size = nrow(data) 5 | y = data[, td$target] 6 | td$n.feat = c( 7 | numerics = sum(sapply(data, is.numeric)) - is.numeric(y), 8 | factors = sum(sapply(data, is.factor)) - is.factor(y) 9 | ) 10 | if (type == "classif") { 11 | td$class.levels = levels(y) 12 | } else { 13 | td$class.levels = as.character(NA) 14 | } 15 | td$has.missings = any(sapply(data, function(x) any(is.na(x)))) 16 | return(td) 17 | } 18 | -------------------------------------------------------------------------------- /todo-files/measures_ks.R: -------------------------------------------------------------------------------- 1 | #' @export ks 2 | #' @rdname measures 3 | #' @usage none 4 | #' @format none 5 | ks = makeMeasure(id = "ks", minimize = FALSE, classif = TRUE, only.binary = TRUE, allowed.pred.types = "prob", 6 | fun = function(task, model, pred, extra.args) { 7 | pos = pred$task.desc$positive 8 | idx1 = pred$data$truth == pos 9 | col1 = paste0("prob.", pos) 10 | x = pred$data[idx1, col1] 11 | y = pred$data[!idx1, col1] 12 | n = length(x) 13 | w = c(x, y) 14 | z = cumsum(ifelse(order(w) <= n, 1 / n, -1 / n)) 15 | if (length(unique(w)) < 2 * n) { 16 | z = z[c(which(diff(sort(w)) != 0), 2 * n)] 17 | } 18 | max(abs(z)) 19 | } 20 | ) 21 | -------------------------------------------------------------------------------- /todo-files/repair_style.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -z "$1" ] ; then 4 | echo "Argument: files to change." 5 | exit 1 6 | fi 7 | # this has a few false positives, especially within string constants. 8 | 9 | sed -i '/^ *[^ #]/s/,\([^ ]\)/, \1/g' "$@" 10 | sed -i '/^ *[^ #]/s/,\([^ ]\)/, \1/g' "$@" # do this multiple times to catch [,,1] etc 11 | sed -i '/^ *[^ #]/s/,\([^ ]\)/, \1/g' "$@" 12 | sed -i '/^ *[^ #]/s/\([^,[#]\) \+,/\1,/g' "$@" 13 | sed -i '/^ *[^ #]/s/ \+(/(/g' "$@" 14 | sed -i '/^ *[^ #]/s/\([^a-zA-Z._0-9]\)for(/\1for (/g' "$@" 15 | sed -i '/^ *[^ #]/s/\([^a-zA-Z._0-9]\)while(/\1while (/g' "$@" 16 | sed -i '/^ *[^ #]/s/\([^a-zA-Z._0-9]\)if(/\1if (/g' "$@" 17 | sed -i '/^ *[^ #]/s/\([^a-zA-Z._0-9]\)in(/\1in (/g' "$@" 18 | sed -i '/^ *[^ #]/s/\([^ !a-zA-Z0-9#:([]\)(/\1 (/g' "$@" 19 | sed -i "/^ *[^ #\"][^\"]*$/s/'/\"/g" "$@" 20 | sed -i '/^ *[^ #]/s/^\([^#(]*\)\([^#<]\)<-/\1\2=/g' "$@" 21 | -------------------------------------------------------------------------------- /todo-files/test_base_plotViperCharts.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("plotViperCharts", { 3 | skip_on_cran() 4 | skip_if_not(RCurl::url.exists("http://viper.ijs.si/api/"), message = "viper API not reachable") 5 | 6 | lrn1 = makeLearner("classif.rpart", predict.type = "prob") 7 | lrn2 = makeLearner("classif.lda", predict.type = "prob") 8 | lrns = list(lrn1, lrn2) 9 | m = train(lrn1, binaryclass.task) 10 | p = predict(m, binaryclass.task) 11 | plotViperCharts(p, browse = FALSE) 12 | 13 | br = benchmark(lrn2, binaryclass.task, resampling = makeResampleDesc("Holdout")) 14 | plotViperCharts(p, browse = FALSE) 15 | 16 | rs = lapply(lrns, holdout, task = binaryclass.task) 17 | names(rs) = c("a", "b") 18 | plotViperCharts(rs, browse = FALSE) 19 | }) 20 | -------------------------------------------------------------------------------- /todo-files/test_classif_fdashapelets.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("fda_classif_shapelets", { 3 | requirePackagesOrSkip("shapeletLib", default.method = "load") 4 | 5 | set.seed(getOption("mlr.debug.seed")) 6 | gp = getTaskData(gunpoint.task) 7 | # gp = load2("../../demo4FDA/gunpoint.RData") 8 | # df = as.data.frame(matrix(data = runif(1000), ncol = 100)) 9 | # df[,"X1"] = as.factor(sample(x = c(1,-1), replace = TRUE, size = 10)) 10 | 11 | m = shapeletLib::learnShapeletModel(data = gp[1:50, -1], label = as.factor(gp[1:50, 1])) 12 | p1 = predict(object = m, newdata = as.matrix(gp[51:200, -1])) 13 | levs = c(2, 1) 14 | p1 = as.factor(ifelse(p1 > 0, levs[2L], levs[1L])) 15 | 16 | 17 | lrn = makeLearner("fdaclassif.shapelet") 18 | task = makeFDAClassifTask(data = gp, target = "X1", positive = "1") 19 | m = try(train(lrn, task, subset = 1:50)) 20 | cp = predict(m, task, subset = 51:200) 21 | 22 | expect_equal(as.character(cp$data$response), as.character(p1)) 23 | 24 | }) 25 | -------------------------------------------------------------------------------- /todo-files/test_classif_geoDA.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_geoDA", { 3 | suppressMessages(requirePackagesOrSkip("DiscriMiner", default.method = "load")) 4 | 5 | m = DiscriMiner::geoDA(multiclass.train[, -multiclass.class.col], 6 | group = multiclass.train[, multiclass.class.col]) 7 | p = DiscriMiner::classify(m, newdata = multiclass.test[, -multiclass.class.col]) 8 | testSimple("classif.geoDA", multiclass.df, multiclass.target, multiclass.train.inds, p$pred_class) 9 | 10 | tt = function(formula, data, subset, ...) { 11 | j = which(colnames(data) == as.character(formula)[2]) 12 | m = DiscriMiner::geoDA(variables = data[subset, -j], group = data[subset, j]) 13 | list(model = m, target = j) 14 | } 15 | 16 | tp = function(model, newdata) { 17 | DiscriMiner::classify(model$model, newdata = newdata[, -model$target])$pred_class 18 | } 19 | 20 | testCV("classif.geoDA", multiclass.df, multiclass.target, tune.train = tt, tune.predict = tp) 21 | }) 22 | -------------------------------------------------------------------------------- /todo-files/test_classif_linDA.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_linDA", { 3 | requirePackagesOrSkip("DiscriMiner", default.method = "load") 4 | 5 | m = DiscriMiner::linDA(multiclass.train[, -multiclass.class.col], group = multiclass.train[, multiclass.class.col]) 6 | p = DiscriMiner::classify(m, newdata = multiclass.test[, -multiclass.class.col]) 7 | testSimple("classif.linDA", multiclass.df, multiclass.target, multiclass.train.inds, p$pred_class) 8 | 9 | tt = function(formula, data, subset, ...) { 10 | j = which(colnames(data) == as.character(formula)[2]) 11 | m = DiscriMiner::linDA(variables = data[subset, -j], group = data[subset, j]) 12 | list(model = m, target = j) 13 | } 14 | 15 | tp = function(model, newdata) { 16 | DiscriMiner::classify(model$model, newdata = newdata[, -model$target])$pred_class 17 | } 18 | 19 | testCV("classif.linDA", multiclass.df, multiclass.target, tune.train = tt, tune.predict = tp) 20 | }) 21 | -------------------------------------------------------------------------------- /todo-files/test_classif_parallelForest.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_parallelForest", { 3 | requirePackages("ParallelForest", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(numboots = 5L, numvars = 2L), 8 | list(numboots = 10L, numsamps = 5L) 9 | ) 10 | 11 | # parallelForest ist not reproducible with set.seed, so we just check for createability 12 | for (i in seq_along(parset.list)) { 13 | parset = parset.list[[i]] 14 | pf.classif.lrn = try(makeLearner("classif.parallelForest", par.vals = parset, predict.type = "response")) 15 | expect_s3_class(pf.classif.lrn, "classif.parallelForest") 16 | pf.classif.m = try(train(pf.classif.lrn, binaryclass.task)) 17 | # expect_s3_class(pf.classif.m, "WrappedModel") 18 | pf.classif.p = try(predict(pf.classif.m, newdata = binaryclass.test)) 19 | expect_s3_class(pf.classif.p, c("PredictionClassif", "Prediction")) 20 | } 21 | }) 22 | -------------------------------------------------------------------------------- /todo-files/test_classif_plsDA.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_plsDA", { 3 | requirePackages("DiscriMiner", default.method = "load") 4 | set.seed(getOption("mlr.debug.seed")) 5 | m = DiscriMiner::plsDA(multiclass.train[, -multiclass.class.col], group = multiclass.train[, multiclass.class.col]) 6 | p = DiscriMiner::classify(m, newdata = multiclass.test[, -multiclass.class.col]) 7 | testSimple("classif.plsDA", multiclass.df, multiclass.target, multiclass.train.inds, p$pred_class) 8 | 9 | tt = function(formula, data, subset, ...) { 10 | j = which(colnames(data) == as.character(formula)[2]) 11 | m = DiscriMiner::plsDA(variables = data[subset, -j], group = data[subset, j]) 12 | list(model = m, target = j) 13 | } 14 | 15 | tp = function(model, newdata) { 16 | DiscriMiner::classify(model$model, newdata = newdata[, -model$target])$pred_class 17 | } 18 | 19 | testCV("classif.plsDA", multiclass.df, multiclass.target, tune.train = tt, tune.predict = tp) 20 | }) 21 | -------------------------------------------------------------------------------- /todo-files/test_classif_quaDA.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_quaDA", { 3 | requirePackagesOrSkip("DiscriMiner", default.method = "load") 4 | 5 | m = DiscriMiner::quaDA(multiclass.train[, -multiclass.class.col], 6 | group = multiclass.train[, multiclass.class.col]) 7 | p = DiscriMiner::classify(m, newdata = multiclass.test[, -multiclass.class.col]) 8 | testSimple("classif.quaDA", multiclass.df, multiclass.target, 9 | multiclass.train.inds, p$pred_class) 10 | 11 | tt = function(formula, data, subset, ...) { 12 | j = which(colnames(data) == as.character(formula)[2]) 13 | m = DiscriMiner::quaDA(variables = data[subset, -j], group = data[subset, j]) 14 | list(model = m, target = j) 15 | } 16 | 17 | tp = function(model, newdata) { 18 | DiscriMiner::classify(model$model, 19 | newdata = newdata[, -model$target])$pred_class 20 | } 21 | 22 | testCV("classif.quaDA", multiclass.df, multiclass.target, tune.train = tt, 23 | tune.predict = tp) 24 | }) 25 | -------------------------------------------------------------------------------- /todo-files/test_classif_rrlda.R: -------------------------------------------------------------------------------- 1 | # 2 | # test_that("classif_rrlda", { 3 | # suppressMessages(requirePackagesOrSkip("!rrlda", default.method = "load")) 4 | # 5 | # m = rrlda::rrlda(x = multiclass.train[, -multiclass.class.col], 6 | # grouping = multiclass.train[, multiclass.target]) 7 | # p = predict(m, x = multiclass.test[, -multiclass.class.col])$class 8 | # 9 | # testSimple("classif.rrlda", multiclass.df, multiclass.target, 10 | # multiclass.train.inds, p) 11 | # }) 12 | -------------------------------------------------------------------------------- /todo-files/test_classif_wsrf.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("classif_wsrf", { 3 | requirePackages("wsrf", default.method = "load") 4 | 5 | parset.list = list( 6 | list(ntrees = 100L), 7 | list(mtry = 5L, ntrees = 100L, weights = FALSE) 8 | ) 9 | 10 | old.predicts.list = list() 11 | old.probs.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | parset = c(list(formula = binaryclass.formula, data = binaryclass.train, parallel = FALSE), parset) 16 | set.seed(getOption("mlr.debug.seed")) 17 | m = do.call(wsrf::wsrf, parset) 18 | old.predicts.list[[i]] = wsrf::predict.wsrf(m, binaryclass.test) 19 | old.probs.list[[i]] = wsrf::predict.wsrf(m, binaryclass.test, type = "prob")[, 1L] 20 | } 21 | 22 | testSimpleParsets("classif.wsrf", binaryclass.df, binaryclass.target, binaryclass.train.inds, 23 | old.predicts.list, parset.list) 24 | testProbParsets("classif.wsrf", binaryclass.df, binaryclass.target, binaryclass.train.inds, 25 | old.probs.list, parset.list) 26 | }) 27 | -------------------------------------------------------------------------------- /todo-files/test_cluster_dbscan.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("cluster_dbscan", { 3 | 4 | parset.list = list( 5 | list() 6 | ) 7 | old.predicts.list = list() 8 | 9 | for (i in seq_along(parset.list)) { 10 | parset = parset.list[[i]] 11 | set.seed(getOption("mlr.debug.seed")) 12 | p = as.integer(predict(m, noclass.train, newdata = noclass.test)) 13 | p[p == 0] = NA 14 | old.predicts.list[[i]] = p 15 | } 16 | 17 | testSimpleParsets("cluster.dbscan", noclass.df, character(0L), noclass.train.inds, 18 | old.predicts.list, parset.list) 19 | }) 20 | -------------------------------------------------------------------------------- /todo-files/test_regr_extraTrees.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_extraTrees", { 3 | requirePackagesOrSkip("extraTrees", default.method = "load") 4 | 5 | parset.list = list( 6 | list(), 7 | list(ntree = 100L), 8 | list(ntree = 250L, mtry = 4L), 9 | list(ntree = 250L, nodesize = 2L, numRandomCuts = 2L) 10 | ) 11 | 12 | x.vars = setdiff(names(regr.num.df), regr.num.target) 13 | x.test = as.matrix(regr.num.test[, x.vars]) 14 | x.train = as.matrix(regr.num.train[, x.vars]) 15 | y = regr.num.train[, regr.num.target] 16 | 17 | old.predicts.list = list() 18 | 19 | for (i in seq_along(parset.list)) { 20 | parset = parset.list[[i]] 21 | parset = c(parset, list(x = x.train, y = y)) 22 | set.seed(getOption("mlr.debug.seed")) 23 | m = do.call(extraTrees::extraTrees, parset) 24 | old.predicts.list[[i]] = predict(m, x.test) 25 | } 26 | 27 | testSimpleParsets("regr.extraTrees", regr.num.df, regr.num.target, 28 | regr.num.train.inds, old.predicts.list, parset.list) 29 | }) 30 | -------------------------------------------------------------------------------- /todo-files/test_regr_nodeHarvest.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_nodeHarvest", { 3 | requirePackagesOrSkip("nodeHarvest", default.method = "load") 4 | 5 | parset.list = list( 6 | list(nodes = 10L), 7 | list(nodes = 10L, maxinter = 1L), 8 | list(nodes = 10L, mode = "outbag") 9 | ) 10 | 11 | old.predicts.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | parset = c(parset, list(X = regr.df[regr.train.inds, -regr.class.col], 16 | Y = regr.df[regr.train.inds, regr.class.col], silent = TRUE)) 17 | set.seed(getOption("mlr.debug.seed")) 18 | m = do.call(nodeHarvest::nodeHarvest, parset) 19 | old.predicts.list[[i]] = predict(m, regr.df[-regr.train.inds, ]) 20 | } 21 | 22 | testSimpleParsets("regr.nodeHarvest", regr.df, regr.target, regr.train.inds, 23 | old.predicts.list, parset.list) 24 | }) 25 | -------------------------------------------------------------------------------- /todo-files/test_regr_randomForestSRC.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_randomForestSRC", { 3 | requirePackagesOrSkip("randomForestSRC", default.method = "load") 4 | 5 | parset.list = list( 6 | list(ntree = 10), 7 | list(ntree = 10, mtry = 5L), 8 | list(ntree = 10, nodesize = 2, na.action = "na.impute", 9 | importance = "permute", proximity = FALSE) 10 | ) 11 | old.predicts.list = list() 12 | 13 | for (i in seq_along(parset.list)) { 14 | parset = parset.list[[i]] 15 | parset = c(parset, list(data = regr.train, formula = regr.formula, 16 | forest = TRUE)) 17 | set.seed(getOption("mlr.debug.seed")) 18 | m = do.call(randomForestSRC::rfsrc, parset) 19 | # versison 2.0 of randomForestSRC returns an array here :( 20 | p = as.numeric(predict(m, newdata = regr.test, membership = FALSE, 21 | na.action = "na.impute")$predicted) 22 | old.predicts.list[[i]] = p 23 | } 24 | 25 | testSimpleParsets("regr.randomForestSRC", regr.df, regr.target, 26 | regr.train.inds, old.predicts.list, parset.list) 27 | }) 28 | -------------------------------------------------------------------------------- /todo-files/test_regr_xyf.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("regr_xyf", { 3 | requirePackagesOrSkip("kohonen", default.method = "load") 4 | 5 | parset.list1 = list( 6 | list(), 7 | list(grid = class::somgrid(xdim = 2L, ydim = 4L)), 8 | list(rlen = 50L) 9 | ) 10 | parset.list2 = list( 11 | list(), 12 | list(xdim = 2L, ydim = 4L), 13 | list(rlen = 50L) 14 | ) 15 | 16 | old.predicts.list = list() 17 | 18 | for (i in seq_along(parset.list1)) { 19 | pars = parset.list1[[i]] 20 | pars$data = as.matrix(regr.num.train[, -regr.num.class.col]) 21 | pars$Y = regr.num.train[, regr.num.class.col] 22 | set.seed(getOption("mlr.debug.seed")) 23 | m = do.call(kohonen::xyf, pars) 24 | p = predict(m, as.matrix(regr.num.test[, -regr.num.class.col])) 25 | old.predicts.list[[i]] = as.vector(p$prediction) 26 | } 27 | 28 | testSimpleParsets("regr.xyf", regr.num.df, regr.num.target, regr.num.train.inds, 29 | old.predicts.list, parset.list2) 30 | }) 31 | -------------------------------------------------------------------------------- /todo-files/test_tuneMIES.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("tuneMIES", { 3 | res = makeResampleDesc("Holdout") 4 | ps1 = makeParamSet( 5 | makeNumericVectorParam("cutoff", length = 2, lower = 0.001, upper = 1, trafo = function(x) as.numeric(x / sum(x))), 6 | makeIntegerParam("ntree", lower = 10, upper = 1000), 7 | makeLogicalParam("replace") 8 | ) 9 | 10 | ctrl = makeTuneControlMies(budget = 20, lambda = 5) 11 | lrn = makeLearner("classif.randomForest") 12 | tr1 = tune(lrn, binaryclass.task, res, par.set = ps1, control = ctrl) 13 | expect_equal(getOptPathLength(tr1@opt.path), 10) 14 | expect_equal(dim(as.data.frame(tr1@opt.path)), c(10, 3 + 1 + 2)) 15 | }) 16 | -------------------------------------------------------------------------------- /todo-files/tune_mies.R: -------------------------------------------------------------------------------- 1 | # tune.mies = function(learner, task, resampling, measures, par.set, control, opt.path, log.fun) { 2 | # requirePackages("mies", "tune.mies") 3 | # 4 | # g = makeTunerTargetFun(learner, task, resampling, measures, par.set, control, opt.path, log.fun, 5 | # arg.as.list=TRUE, trafo=TRUE) 6 | # 7 | # mies.ctrl = do.call("makeMiesControl", control@extra.args) 8 | # or = mies(fitn=g, par.set=par.set, control=mies.ctrl) 9 | # i = getOptPathBestIndex(opt.path, measureAggrName(measures[[1]]), ties="random") 10 | # e = getOptPathEl(opt.path, i) 11 | # new("OptResult", learner, control, e$x, e$y, opt.path) 12 | # } 13 | -------------------------------------------------------------------------------- /vignettes/img/benchmark_processing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/benchmark_processing.png -------------------------------------------------------------------------------- /vignettes/img/learn_task.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/learn_task.png -------------------------------------------------------------------------------- /vignettes/img/learner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/learner.png -------------------------------------------------------------------------------- /vignettes/img/mlrLogo.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/mlrLogo.psd -------------------------------------------------------------------------------- /vignettes/img/mlrLogo_blue_141x64.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/mlrLogo_blue_141x64.png -------------------------------------------------------------------------------- /vignettes/img/mlrLogo_blue_566x256.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/mlrLogo_blue_566x256.png -------------------------------------------------------------------------------- /vignettes/img/mlrLogo_favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/mlrLogo_favicon.png -------------------------------------------------------------------------------- /vignettes/img/mlrLogo_white_141x64.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/mlrLogo_white_141x64.png -------------------------------------------------------------------------------- /vignettes/img/mlrLogo_white_566x256.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/mlrLogo_white_566x256.png -------------------------------------------------------------------------------- /vignettes/img/mlrLogo_white_88x40.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/mlrLogo_white_88x40.png -------------------------------------------------------------------------------- /vignettes/img/mlr_logo-120x64.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/mlr_logo-120x64.png -------------------------------------------------------------------------------- /vignettes/img/mlr_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/mlr_logo.png -------------------------------------------------------------------------------- /vignettes/img/nested_resampling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/nested_resampling.png -------------------------------------------------------------------------------- /vignettes/img/resampling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/resampling.png -------------------------------------------------------------------------------- /vignettes/img/resampling_desc_and_instance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/resampling_desc_and_instance.png -------------------------------------------------------------------------------- /vignettes/img/roc_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/roc_example.png -------------------------------------------------------------------------------- /vignettes/img/spatial_cross_validation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/spatial_cross_validation.png -------------------------------------------------------------------------------- /vignettes/img/theoretic_threshold.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/theoretic_threshold.png -------------------------------------------------------------------------------- /vignettes/img/theoretic_weight_positive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/theoretic_weight_positive.png -------------------------------------------------------------------------------- /vignettes/img/train-basic_processing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/train-basic_processing.png -------------------------------------------------------------------------------- /vignettes/img/tune-varsel_processing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/tune-varsel_processing.png -------------------------------------------------------------------------------- /vignettes/img/variabel_selection_scheme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/variabel_selection_scheme.png -------------------------------------------------------------------------------- /vignettes/img/weight_positive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlr-org/mlr/b1e2af9f4d6573aaeaa1e1f5b1bb4782a313226c/vignettes/img/weight_positive.png -------------------------------------------------------------------------------- /vignettes/mlr.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "mlr: Machine Learning in R" 3 | # author: "Bernd Bischl 4 | # date: "`r Sys.Date()`" 5 | output: rmarkdown::html_vignette 6 | vignette: > 7 | %\VignetteIndexEntry{mlr} 8 | %\VignetteEngine{knitr::rmarkdown} 9 | \usepackage[utf8]{inputenc} 10 | --- 11 | 12 | ```{r, echo = FALSE, message=FALSE} 13 | library("mlr") 14 | library("BBmisc") 15 | library("ParamHelpers") 16 | 17 | # show grouped code output instead of single lines 18 | knitr::opts_chunk$set(collapse = TRUE) 19 | ``` 20 | Information on mlr is available here: 21 | 22 | * [Project Page](https://github.com/mlr-org/mlr/) 23 | * [Tutorial](https://mlr.mlr-org.com/index.html) 24 | * [Wiki](https://github.com/mlr-org/mlr/wiki) with additional information for developers 25 | --------------------------------------------------------------------------------