pipeops 使参数不可用于在 mlr3proba 中进行调整

如何解决pipeops 使参数不可用于在 mlr3proba 中进行调整

我正在使用 mlr3proba 包进行机器学习生存分析。
我的数据集包含因子、数值和整数特征。
我使用 'scale' 和 'encode' pipeops 为 deephit 和 deepsurv 神经网络方法预处理我的数据集,如下代码:

task.mlr <- TaskSurv$new(id = "id",backend = dataset,time = time,event = status)

inner.rsmp <- rsmp("cv",folds = 5)

measure <- msr("surv.cindex")

tuner <- tnr("random_search")

terminator <- trm("evals",n_evals = 30)

deephit.learner <- lrn("surv.deephit",optimizer = "adam",epochs = 50)

nn.search_space <- ps(dropout = p_dbl(lower = 0,upper = 1),alpha = p_dbl(lower = 0,upper = 1))

deephit.learner <- po("encode") %>>% po("scale") %>>% po("learner",deephit.learner)

deephit.instance <- TuningInstanceSingleCrit$new(
   task = task.mlr,learner = deephit.learner,search_space = nn.search_space,resampling = inner.rsmp,measure = measure,terminator = terminator
)

tuner$optimize(deephit.instance)
   

但是当我运行最后一行时,它显示以下错误:

Error in self$assert(xs):
   Assertion on 'xs' failed: Parameter 'dropout' not available. Did you mean 'encode.method'/'encode.affect_columns' / 'scale.center'?.

非常感谢您的帮助。

解决方法

您好,感谢您使用 mlr3proba!这样做的原因是参数名称在管道中包装时会发生变化,您可以在下面的示例中看到这一点。有几个选项可以解决这个问题,您可以在包装在 PipeOps 中后更改参数 ids 以匹配新名称(下面的选项 1),或者您可以先指定学习器的调整范围,然后将其包装在 PipeOp 中(选项 2下面),或者您可以使用 AutoTuner 并将其包装在 PipeOps 中。我使用 this tutorial 中的最后一个选项。

library(mlr3proba)
library(mlr3)
library(paradox)
library(mlr3tuning)
library(mlr3extralearners)
library(mlr3pipelines)

task.mlr <- tsk("rats")
inner.rsmp <- rsmp("holdout")
measure <- msr("surv.cindex")
tuner <- tnr("random_search")
terminator <- trm("evals",n_evals = 2)

###########
# Option 1
###########
deephit.learner <- lrn("surv.deephit",optimizer = "adam",epochs = 50)
deephit.learner <- po("encode") %>>% po("scale") %>>% po("learner",deephit.learner)

deephit.learner$param_set$ids()
#>  [1] "encode.method"               "encode.affect_columns"      
#>  [3] "scale.center"                "scale.scale"                
#>  [5] "scale.robust"                "scale.affect_columns"       
#>  [7] "surv.deephit.frac"           "surv.deephit.cuts"          
#>  [9] "surv.deephit.cutpoints"      "surv.deephit.scheme"        
#> [11] "surv.deephit.cut_min"        "surv.deephit.num_nodes"     
#> [13] "surv.deephit.batch_norm"     "surv.deephit.dropout"       
#> [15] "surv.deephit.activation"     "surv.deephit.custom_net"    
#> [17] "surv.deephit.device"         "surv.deephit.mod_alpha"     
#> [19] "surv.deephit.sigma"          "surv.deephit.shrink"        
#> [21] "surv.deephit.optimizer"      "surv.deephit.rho"           
#> [23] "surv.deephit.eps"            "surv.deephit.lr"            
#> [25] "surv.deephit.weight_decay"   "surv.deephit.learning_rate" 
#> [27] "surv.deephit.lr_decay"       "surv.deephit.betas"         
#> [29] "surv.deephit.amsgrad"        "surv.deephit.lambd"         
#> [31] "surv.deephit.alpha"          "surv.deephit.t0"            
#> [33] "surv.deephit.momentum"       "surv.deephit.centered"      
#> [35] "surv.deephit.etas"           "surv.deephit.step_sizes"    
#> [37] "surv.deephit.dampening"      "surv.deephit.nesterov"      
#> [39] "surv.deephit.batch_size"     "surv.deephit.epochs"        
#> [41] "surv.deephit.verbose"        "surv.deephit.num_workers"   
#> [43] "surv.deephit.shuffle"        "surv.deephit.best_weights"  
#> [45] "surv.deephit.early_stopping" "surv.deephit.min_delta"     
#> [47] "surv.deephit.patience"       "surv.deephit.interpolate"   
#> [49] "surv.deephit.inter_scheme"   "surv.deephit.sub"

nn.search_space <- ps(surv.deephit.dropout = p_dbl(lower = 0,upper = 1),surv.deephit.alpha = p_dbl(lower = 0,upper = 1))

deephit.instance <- TuningInstanceSingleCrit$new(
  task = task.mlr,learner = deephit.learner,search_space = nn.search_space,resampling = inner.rsmp,measure = measure,terminator = terminator
)

tuner$optimize(deephit.instance)
#> INFO  [08:15:29.770] [bbotk] Starting to optimize 2 parameter(s) with '<OptimizerRandomSearch>' and '<TerminatorEvals> [n_evals=2]' 
#> INFO  [08:15:29.841] [bbotk] Evaluating 1 configuration(s) 
#> INFO  [08:15:30.115] [mlr3]  Running benchmark with 1 resampling iterations 
#> INFO  [08:15:30.314] [mlr3]  Applying learner 'encode.scale.surv.deephit' on task 'rats' (iter 1/1) 
#> INFO  [08:15:39.997] [mlr3]  Finished benchmark 
#> INFO  [08:15:40.296] [bbotk] Result of batch 1: 
#> INFO  [08:15:40.302] [bbotk]  surv.deephit.dropout surv.deephit.alpha surv.harrell_c 
#> INFO  [08:15:40.302] [bbotk]            0.06494213          0.7109244      0.7516212 
#> INFO  [08:15:40.302] [bbotk]                                 uhash 
#> INFO  [08:15:40.302] [bbotk]  27794d84-ba46-4900-8835-de24fcda8c7f 
#> INFO  [08:15:40.307] [bbotk] Evaluating 1 configuration(s) 
#> INFO  [08:15:40.395] [mlr3]  Running benchmark with 1 resampling iterations 
#> INFO  [08:15:40.406] [mlr3]  Applying learner 'encode.scale.surv.deephit' on task 'rats' (iter 1/1) 
#> INFO  [08:15:41.807] [mlr3]  Finished benchmark 
#> INFO  [08:15:41.903] [bbotk] Result of batch 2: 
#> INFO  [08:15:41.905] [bbotk]  surv.deephit.dropout surv.deephit.alpha surv.harrell_c 
#> INFO  [08:15:41.905] [bbotk]            0.05524693          0.2895437      0.7749676 
#> INFO  [08:15:41.905] [bbotk]                                 uhash 
#> INFO  [08:15:41.905] [bbotk]  013795a3-766c-48f9-a3fe-2aae5d4cad48 
#> INFO  [08:15:41.918] [bbotk] Finished optimizing after 2 evaluation(s) 
#> INFO  [08:15:41.919] [bbotk] Result: 
#> INFO  [08:15:41.920] [bbotk]  surv.deephit.dropout surv.deephit.alpha learner_param_vals  x_domain 
#> INFO  [08:15:41.920] [bbotk]            0.05524693          0.2895437          <list[6]> <list[2]> 
#> INFO  [08:15:41.920] [bbotk]  surv.harrell_c 
#> INFO  [08:15:41.920] [bbotk]       0.7749676
#>    surv.deephit.dropout surv.deephit.alpha learner_param_vals  x_domain
#> 1:           0.05524693          0.2895437          <list[6]> <list[2]>
#>    surv.harrell_c
#> 1:      0.7749676

###########
# Option 2
###########
deephit.learner <- lrn("surv.deephit",epochs = 50)
deephit.learner$param_set$values = list(
  dropout = to_tune(0,1),alpha = to_tune(0,1)
)

deephit.learner <- po("encode") %>>% 
  po("scale") %>>% 
  po("learner",deephit.learner)

deephit.learner = GraphLearner$new(deephit.learner)

tuned.deephit = tune_nested(
  method = "random_search",task = task.mlr,inner_resampling = rsmp("holdout"),outer_resampling = rsmp("holdout"),measure = msr("surv.cindex"),term_evals = 2
)
#> INFO  [08:15:43.167] [mlr3]  Applying learner 'encode.scale.surv.deephit.tuned' on task 'rats' (iter 1/1) 
#> INFO  [08:15:43.477] [bbotk] Starting to optimize 2 parameter(s) with '<OptimizerRandomSearch>' and '<TerminatorRunTime> [secs=2]' 
#> INFO  [08:15:43.495] [bbotk] Evaluating 1 configuration(s) 
#> INFO  [08:15:43.565] [mlr3]  Running benchmark with 1 resampling iterations 
#> INFO  [08:15:43.575] [mlr3]  Applying learner 'encode.scale.surv.deephit' on task 'rats' (iter 1/1) 
#> INFO  [08:15:44.969] [mlr3]  Finished benchmark 
#> INFO  [08:15:45.058] [bbotk] Result of batch 1: 
#> INFO  [08:15:45.064] [bbotk]  surv.deephit.dropout surv.deephit.alpha surv.harrell_c 
#> INFO  [08:15:45.064] [bbotk]             0.3492627          0.2304623      0.6745362 
#> INFO  [08:15:45.064] [bbotk]                                 uhash 
#> INFO  [08:15:45.064] [bbotk]  4ce96658-4d4a-4835-9d9f-a93398471aed 
#> INFO  [08:15:45.069] [bbotk] Evaluating 1 configuration(s) 
#> INFO  [08:15:45.127] [mlr3]  Running benchmark with 1 resampling iterations 
#> INFO  [08:15:45.136] [mlr3]  Applying learner 'encode.scale.surv.deephit' on task 'rats' (iter 1/1) 
#> INFO  [08:15:46.064] [mlr3]  Finished benchmark 
#> INFO  [08:15:46.171] [bbotk] Result of batch 2: 
#> INFO  [08:15:46.176] [bbotk]  surv.deephit.dropout surv.deephit.alpha surv.harrell_c 
#> INFO  [08:15:46.176] [bbotk]             0.1118406          0.7810053      0.6020236 
#> INFO  [08:15:46.176] [bbotk]                                 uhash 
#> INFO  [08:15:46.176] [bbotk]  6a065d27-a7e0-4e72-8e1e-6151408510cf 
#> INFO  [08:15:46.186] [bbotk] Finished optimizing after 2 evaluation(s) 
#> INFO  [08:15:46.187] [bbotk] Result: 
#> INFO  [08:15:46.191] [bbotk]  surv.deephit.dropout surv.deephit.alpha learner_param_vals  x_domain 
#> INFO  [08:15:46.191] [bbotk]             0.3492627          0.2304623          <list[4]> <list[2]> 
#> INFO  [08:15:46.191] [bbotk]  surv.harrell_c 
#> INFO  [08:15:46.191] [bbotk]       0.6745362

reprex package (v0.3.0) 于 2021 年 4 月 26 日创建

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


依赖报错 idea导入项目后依赖报错,解决方案:https://blog.csdn.net/weixin_42420249/article/details/81191861 依赖版本报错:更换其他版本 无法下载依赖可参考:https://blog.csdn.net/weixin_42628809/a
错误1:代码生成器依赖和mybatis依赖冲突 启动项目时报错如下 2021-12-03 13:33:33.927 ERROR 7228 [ main] o.s.b.d.LoggingFailureAnalysisReporter : *************************** APPL
错误1:gradle项目控制台输出为乱码 # 解决方案:https://blog.csdn.net/weixin_43501566/article/details/112482302 # 在gradle-wrapper.properties 添加以下内容 org.gradle.jvmargs=-Df
错误还原:在查询的过程中,传入的workType为0时,该条件不起作用 &lt;select id=&quot;xxx&quot;&gt; SELECT di.id, di.name, di.work_type, di.updated... &lt;where&gt; &lt;if test=&qu
报错如下,gcc版本太低 ^ server.c:5346:31: 错误:‘struct redisServer’没有名为‘server_cpulist’的成员 redisSetCpuAffinity(server.server_cpulist); ^ server.c: 在函数‘hasActiveC
解决方案1 1、改项目中.idea/workspace.xml配置文件,增加dynamic.classpath参数 2、搜索PropertiesComponent,添加如下 &lt;property name=&quot;dynamic.classpath&quot; value=&quot;tru
删除根组件app.vue中的默认代码后报错:Module Error (from ./node_modules/eslint-loader/index.js): 解决方案:关闭ESlint代码检测,在项目根目录创建vue.config.js,在文件中添加 module.exports = { lin
查看spark默认的python版本 [root@master day27]# pyspark /home/software/spark-2.3.4-bin-hadoop2.7/conf/spark-env.sh: line 2: /usr/local/hadoop/bin/hadoop: No s
使用本地python环境可以成功执行 import pandas as pd import matplotlib.pyplot as plt # 设置字体 plt.rcParams[&#39;font.sans-serif&#39;] = [&#39;SimHei&#39;] # 能正确显示负号 p
错误1:Request method ‘DELETE‘ not supported 错误还原:controller层有一个接口,访问该接口时报错:Request method ‘DELETE‘ not supported 错误原因:没有接收到前端传入的参数,修改为如下 参考 错误2:cannot r
错误1:启动docker镜像时报错:Error response from daemon: driver failed programming external connectivity on endpoint quirky_allen 解决方法:重启docker -&gt; systemctl r
错误1:private field ‘xxx‘ is never assigned 按Altʾnter快捷键,选择第2项 参考:https://blog.csdn.net/shi_hong_fei_hei/article/details/88814070 错误2:启动时报错,不能找到主启动类 #
报错如下,通过源不能下载,最后警告pip需升级版本 Requirement already satisfied: pip in c:\users\ychen\appdata\local\programs\python\python310\lib\site-packages (22.0.4) Coll
错误1:maven打包报错 错误还原:使用maven打包项目时报错如下 [ERROR] Failed to execute goal org.apache.maven.plugins:maven-resources-plugin:3.2.0:resources (default-resources)
错误1:服务调用时报错 服务消费者模块assess通过openFeign调用服务提供者模块hires 如下为服务提供者模块hires的控制层接口 @RestController @RequestMapping(&quot;/hires&quot;) public class FeignControl
错误1:运行项目后报如下错误 解决方案 报错2:Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project sb 解决方案:在pom.
参考 错误原因 过滤器或拦截器在生效时,redisTemplate还没有注入 解决方案:在注入容器时就生效 @Component //项目运行时就注入Spring容器 public class RedisBean { @Resource private RedisTemplate&lt;String
使用vite构建项目报错 C:\Users\ychen\work&gt;npm init @vitejs/app @vitejs/create-app is deprecated, use npm init vite instead C:\Users\ychen\AppData\Local\npm-