如何解决TensorFlow中的两个独立优化器
我用TensorFlow编写了以下代码,用于一些强化学习项目:
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
reset_graph()
n_inputs = 10
n_hidden = 8
n_outputs = 3
learning_rate = 0.0025
X2 = tf.placeholder(tf.float32,shape=[None,n_inputs],name='obs2')
initializer2 = tf.contrib.layers.variance_scaling_initializer()
hidden2 = tf.layers.dense(X2,10,activation=tf.nn.tanh,name='hid2',kernel_initializer=initializer2,kernel_regularizer = max_norm_reg)
logits2 = tf.layers.dense(hidden2,3,name='log2')
outputs2 = tf.nn.softmax(logits2,name='out2')
action2 = tf.multinomial(logits2,num_samples=1,name='act2')
cross_entropy2 = tf.nn.sparse_softmax_cross_entropy_with_logits(labels= action2[0],logits=logits2,name='cross_e1')
optimizer2 = tf.train.GradientDescentOptimizer(learning_rate=0.002,name = 'opt2')
grads_and_vars2 = optimizer2.compute_gradients(cross_entropy2)
gradients2 = [grad2 for grad2,variable2 in grads_and_vars2]
gradient_placeholders2 = []
grads_and_vars_feed2 = []
for grad2,variable2 in grads_and_vars2:
gradient_placeholder2 = tf.placeholder(tf.float32)
gradient_placeholders2.append(gradient_placeholder2)
grads_and_vars_feed2.append((gradient_placeholder2,variable2))
training_op2 = optimizer2.apply_gradients(grads_and_vars_feed2)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32,name='obs')
hidden = tf.layers.dense(X,name = 'hid1',kernel_initializer=initializer,kernel_regularizer = max_norm_reg)
logits1 = tf.layers.dense(hidden,n_outputs,name='log1')
outputs1 = tf.nn.softmax(logits1,name='out1')
action1 = tf.multinomial(logits1,name='act1')
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels= action1[0],logits=logits1,name='cross_e1')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate,name='opt1')
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad,variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad,variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32)
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder,variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
我想拥有两个独立的神经网络。
问题是当我生成图形时,图形中名为“ opt1”的优化程序之一同时连接到两个神经网络(分别连接到隐藏层和logits),但未在代码中连接。结果,我无法正确运行会话。是某种TensorFlow行为还是代码不正确?要具有两个完全独立的图部分,我该怎么办。
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。