ValueError:训练模型时YOLO_v3

如何解决ValueError:训练模型时YOLO_v3

在Google Colab上训练模型时,我遇到以下错误,亚当依赖项/优化器可能存在问题。

ValueError:张量转换请求具有dtype float32的Tensor的dtype float32_ref:

有什么办法可以解决此问题?

训练脚本的代码:

import os
import sys
import argparse

def get_parent_dir(n=1):
    """ returns the n-th parent dicrectory of the current
    working directory """
    current_path = os.path.dirname(os.path.abspath(__file__))
    for k in range(n):
        current_path = os.path.dirname(current_path)
    return current_path

src_path = os.path.join(get_parent_dir(0),'src')
sys.path.append(src_path)

utils_path = os.path.join(get_parent_dir(1),'Utils')
sys.path.append(utils_path)

import numpy as np
import keras.backend as K
from keras.layers import Input,Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard,ModelCheckpoint,ReduceLROnPlateau,EarlyStopping
from keras_yolo3.yolo3.model import preprocess_true_boxes,yolo_body,tiny_yolo_body,yolo_loss
from keras_yolo3.yolo3.utils import get_random_data
from PIL import Image
from time import time
import pickle

from Train_Utils import get_classes,get_anchors,create_model,create_tiny_model,data_generator,data_generator_wrapper,ChangeToOtherMachine


keras_path = os.path.join(src_path,'keras_yolo3')
Data_Folder = os.path.join(get_parent_dir(1),'Data')
Image_Folder = os.path.join(Data_Folder,'Source_Images','Training_Images')
VoTT_Folder = os.path.join(Image_Folder,'vott-csv-export')
YOLO_filename = os.path.join(VoTT_Folder,'data_train.txt')

Model_Folder = os.path.join(Data_Folder,'Model_Weights')
YOLO_classname = os.path.join(Model_Folder,'data_classes.txt')

log_dir = Model_Folder
anchors_path = os.path.join(keras_path,'model_data','yolo_anchors.txt') 
weights_path = os.path.join(keras_path,'yolo.h5') 

FLAGS = None

if __name__ == '__main__':
    # Delete all default flags
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''

    parser.add_argument(
        "--annotation_file",type=str,default=YOLO_filename,help = "Path to annotation file for Yolo. Default is "+ YOLO_filename
        )
    parser.add_argument(
        "--classes_file",default=YOLO_classname,help = "Path to YOLO classnames. Default is "+ YOLO_classname
        )

    parser.add_argument(
        "--log_dir",default=log_dir,help = "Folder to save training logs and trained weights to. Default is "+ log_dir 
        )

    parser.add_argument(
        "--anchors_path",default=anchors_path,help = "Path to YOLO anchors. Default is "+ anchors_path
        )

    parser.add_argument(
        "--weights_path",default=weights_path,help = "Path to pre-trained YOLO weights. Default is "+ weights_path
        )
    parser.add_argument(
        "--val_split",type=float,default=0.1,help = "Percentage of training set to be used for validation. Default is 10%."
        )
    parser.add_argument(
        "--is_tiny",default=False,action="store_true",help = "Use the tiny Yolo version for better performance and less accuracy. Default is False."
        )
    parser.add_argument(
        "--random_seed",default=None,help = "Random seed value to make script deterministic. Default is 'None',i.e. non-deterministic."
        )
    parser.add_argument(
        "--epochs",default=51,help = "Number of epochs for training last layers and number of epochs for fine-tuning layers. Default is 51."
        )

    
    FLAGS = parser.parse_args()

    np.random.seed(FLAGS.random_seed)

    log_dir = FLAGS.log_dir

    class_names = get_classes(FLAGS.classes_file)
    num_classes = len(class_names)
    anchors = get_anchors(FLAGS.anchors_path)
    weights_path = FLAGS.weights_path

    input_shape = (416,416) # multiple of 32,height,width
    epoch1,epoch2 = FLAGS.epochs,FLAGS.epochs

    is_tiny_version = (len(anchors)==6) # default setting
    if FLAGS.is_tiny:
        model = create_tiny_model(input_shape,anchors,num_classes,freeze_body=2,weights_path = weights_path)
    else:
        model = create_model(input_shape,weights_path = weights_path) # make sure you know what you freeze

    log_dir_time = os.path.join(log_dir,'{}'.format(int(time())))
    logging = TensorBoard(log_dir=log_dir_time)
    checkpoint = ModelCheckpoint(os.path.join(log_dir,'checkpoint.h5'),monitor='val_loss',save_weights_only=True,save_best_only=True,period=5)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',factor=0.1,patience=3,verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',min_delta=0,patience=10,verbose=1)

    val_split = FLAGS.val_split
    with open(FLAGS.annotation_file) as f:
        lines = f.readlines()

    # This step makes sure that the path names correspond to the local machine 
    # This is important if annotation and training are done on different machines (e.g. training on AWS)
    lines  = ChangeToOtherMachine(lines,remote_machine = '')
    np.random.shuffle(lines)
    num_val = int(len(lines)*val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first,to get a stable loss. 
    # Adjust num epochs to your dataset. This step is enough to obtain a decent model.
    if True:
        model.compile(optimizer=Adam(lr=1e-3),loss={
            # use custom yolo_loss Lambda layer.
            'yolo_loss': lambda y_true,y_pred: y_pred})

        batch_size = 32
        print('Train on {} samples,val on {} samples,with batch size {}.'.format(num_train,num_val,batch_size))
        history = model.fit_generator(data_generator_wrapper(lines[:num_train],batch_size,input_shape,num_classes),steps_per_epoch=max(1,num_train//batch_size),validation_data=data_generator_wrapper(lines[num_train:],validation_steps=max(1,num_val//batch_size),epochs=epoch1,initial_epoch=0,callbacks=[logging,checkpoint])
        model.save_weights(os.path.join(log_dir,'trained_weights_stage_1.h5'))

        step1_train_loss = history.history['loss']
        
        file = open(os.path.join(log_dir_time,'step1_loss.npy'),"w")
        with open(os.path.join(log_dir_time,'w') as f:
            for item in step1_train_loss:
                f.write("%s\n" % item) 
        file.close()
        
        step1_val_loss = np.array(history.history['val_loss'])
        
        file = open(os.path.join(log_dir_time,'step1_val_loss.npy'),'w') as f:
            for item in step1_val_loss:
                f.write("%s\n" % item) 
        file.close()
        
    # Unfreeze and continue training,to fine-tune.
    # Train longer if the result is unsatisfactory.
    if True:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),loss={'yolo_loss': lambda y_true,y_pred: y_pred}) # recompile to apply the change
        print('Unfreeze all layers.')

        batch_size = 4 # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples,batch_size))
        history=model.fit_generator(data_generator_wrapper(lines[:num_train],epochs=epoch1+epoch2,initial_epoch=epoch1,checkpoint,reduce_lr,early_stopping])
        model.save_weights(os.path.join(log_dir,'trained_weights_final.h5'))
        step2_train_loss = history.history['loss']
        
        file = open(os.path.join(log_dir_time,'step2_loss.npy'),'w') as f:
            for item in step2_train_loss:
                f.write("%s\n" % item) 
        file.close()
        
        step2_val_loss = np.array(history.history['val_loss'])
        
        file = open(os.path.join(log_dir_time,'step2_val_loss.npy'),'w') as f:
            for item in step2_val_loss:
                f.write("%s\n" % item) 
        file.close()

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


依赖报错 idea导入项目后依赖报错,解决方案:https://blog.csdn.net/weixin_42420249/article/details/81191861 依赖版本报错:更换其他版本 无法下载依赖可参考:https://blog.csdn.net/weixin_42628809/a
错误1:代码生成器依赖和mybatis依赖冲突 启动项目时报错如下 2021-12-03 13:33:33.927 ERROR 7228 [ main] o.s.b.d.LoggingFailureAnalysisReporter : *************************** APPL
错误1:gradle项目控制台输出为乱码 # 解决方案:https://blog.csdn.net/weixin_43501566/article/details/112482302 # 在gradle-wrapper.properties 添加以下内容 org.gradle.jvmargs=-Df
错误还原:在查询的过程中,传入的workType为0时,该条件不起作用 <select id="xxx"> SELECT di.id, di.name, di.work_type, di.updated... <where> <if test=&qu
报错如下,gcc版本太低 ^ server.c:5346:31: 错误:‘struct redisServer’没有名为‘server_cpulist’的成员 redisSetCpuAffinity(server.server_cpulist); ^ server.c: 在函数‘hasActiveC
解决方案1 1、改项目中.idea/workspace.xml配置文件,增加dynamic.classpath参数 2、搜索PropertiesComponent,添加如下 <property name="dynamic.classpath" value="tru
删除根组件app.vue中的默认代码后报错:Module Error (from ./node_modules/eslint-loader/index.js): 解决方案:关闭ESlint代码检测,在项目根目录创建vue.config.js,在文件中添加 module.exports = { lin
查看spark默认的python版本 [root@master day27]# pyspark /home/software/spark-2.3.4-bin-hadoop2.7/conf/spark-env.sh: line 2: /usr/local/hadoop/bin/hadoop: No s
使用本地python环境可以成功执行 import pandas as pd import matplotlib.pyplot as plt # 设置字体 plt.rcParams['font.sans-serif'] = ['SimHei'] # 能正确显示负号 p
错误1:Request method ‘DELETE‘ not supported 错误还原:controller层有一个接口,访问该接口时报错:Request method ‘DELETE‘ not supported 错误原因:没有接收到前端传入的参数,修改为如下 参考 错误2:cannot r
错误1:启动docker镜像时报错:Error response from daemon: driver failed programming external connectivity on endpoint quirky_allen 解决方法:重启docker -> systemctl r
错误1:private field ‘xxx‘ is never assigned 按Altʾnter快捷键,选择第2项 参考:https://blog.csdn.net/shi_hong_fei_hei/article/details/88814070 错误2:启动时报错,不能找到主启动类 #
报错如下,通过源不能下载,最后警告pip需升级版本 Requirement already satisfied: pip in c:\users\ychen\appdata\local\programs\python\python310\lib\site-packages (22.0.4) Coll
错误1:maven打包报错 错误还原:使用maven打包项目时报错如下 [ERROR] Failed to execute goal org.apache.maven.plugins:maven-resources-plugin:3.2.0:resources (default-resources)
错误1:服务调用时报错 服务消费者模块assess通过openFeign调用服务提供者模块hires 如下为服务提供者模块hires的控制层接口 @RestController @RequestMapping("/hires") public class FeignControl
错误1:运行项目后报如下错误 解决方案 报错2:Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project sb 解决方案:在pom.
参考 错误原因 过滤器或拦截器在生效时,redisTemplate还没有注入 解决方案:在注入容器时就生效 @Component //项目运行时就注入Spring容器 public class RedisBean { @Resource private RedisTemplate<String
使用vite构建项目报错 C:\Users\ychen\work>npm init @vitejs/app @vitejs/create-app is deprecated, use npm init vite instead C:\Users\ychen\AppData\Local\npm-