如何解决TF2.0中Sendex教程上的修改过的TensorBoard
我尝试使用Daniel教程中从this编写的ModifiedTensorBoard类
我找到了这种替代方法solution,但是尝试时却收到了此错误代码。
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.create_file_writer(self.log_dir)
self._log_write_dir = os.path.join(self.log_dir,MODEL_NAME)
# Overriding this method to stop creating default log writer
def set_model(self,model):
pass
# Overrided,saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self,epoch,logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only,no need to save anything at epoch end
def on_batch_end(self,batch,logs=None):
pass
# Overrided,so won't close writer
def on_train_end(self,_):
pass
def on_train_batch_end(self,logs=None):
pass
# Custom method for saving own metrics
# Creates writer,writes custom metrics and closes writer
def update_stats(self,**stats):
self._write_logs(stats,self.step)
def _write_logs(self,logs,index):
with self.writer.as_default():
for name,value in logs.items():
tf.summary.scalar(name,value,step=index)
self.step += 1
self.writer.flush()
在_write_logs方法中,出现此错误
Context manager 'generator' doesn't implement __enter__ and __exit__.pylint(not-context-manager)
我也收到了我的ModifiedTensorBoard中没有_train_step的错误消息。
'ModifiedTensorBoard' object has no attribute '_train_step'
有人有同样的问题吗?
解决方法
我也遇到了一些错误,但不幸的是这是几个月前的事情.. 不妨试试这样的事情。
from tensorflow.keras.callbacks import TensorBoard
import tensorflow as tf
import os
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.create_file_writer(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self,model):
self.model = model
self._log_write_dir = self.log_dir
self._train_dir = os.path.join(self._log_write_dir,'train')
self._train_step = self.model._train_counter
self._val_dir = os.path.join(self._log_write_dir,'validation')
self._val_step = self.model._test_counter
self._should_write_train_graph = False
# Overrided,saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self,epoch,logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only,no need to save anything at epoch end
def on_batch_end(self,batch,logs=None):
pass
# Overrided,so won't close writer
def on_train_end(self,_):
pass
def _write_logs(self,logs,index):
with self.writer.as_default():
for log in logs.items():
tf.summary.scalar(log[0],log[1],step=index)
#self.writer.
# (summary,index)
# Custom method for saving own metrics
# Creates writer,writes custom metrics and closes writer
def update_stats(self,**stats):
self._write_logs(stats,self.step)
希望对你有帮助
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。