如何解决Keras,使用数据生成器VAE
我目前正在尝试实现变体自动编码器,但是我很困惑,无法理解如何在Keras中使用数据生成器。到目前为止,我有:
"""class Sampling(layers.Layer):
def call(self,inputs):
z_mean,z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch,dim))
return z_mean + tf.exp(z_log_var / 2) * epsilon
class factor_vae(keras.Model):
def __init__(self):
super(factor_vae,self).__init__()
self.encoder = self.encoder_factor_vae()
self.decoder = self.decoder_factor_vae()
self.classifier = self.MLP_classifier()
def train_step(self,data):
data = data[0]
with tf.GradientTape() as tape:
z,z_mean,z_log_var = self.encoder(data)
reconstruction = self.decoder(z)
reconstruction_loss = tf.reduce_mean(
keras.losses.mse(data,reconstruction))
reconstruction_loss *= 4096 #denna kan ändras
kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)
kl_loss = tf.reduce_mean(kl_loss)
kl_loss *= -0.5
total_loss = reconstruction_loss + (kl_loss)
grads = tape.gradient(total_loss,self.trainable_weights)
self.optimizer.apply_gradients(zip(grads,self.trainable_weights))
return {
"loss": total_loss,"reconstruction_loss": reconstruction_loss,"kl_loss": kl_loss,}
def encoder_factor_vae(self):
x_inp = Input(shape=(64,64,1))
z = layers.Conv2D(filters=32,kernel_size=(4,4),activation="relu",strides=2,padding="same")(x_inp)
z = BatchNormalization()(z)
z = layers.Conv2D(filters=32,padding="same")(z)
z = BatchNormalization()(z)
z = layers.Conv2D(filters=64,padding="same")(z)
z = BatchNormalization()(z)
z = layers.Flatten()(z)
z = Dense(units=128,activation='relu')(z)
z = BatchNormalization()(z)
z_mean = Dense(units=10,activation='relu')(z) # här tror jag samplingen ska ske
z_log_var = Dense(units=10,activation='sigmoid')(z) # bör vara sampling från reparameterizationen
z = Sampling()([z_mean,z_log_var])
encoder = keras.Model(x_inp,[z,z_log_var],name="encoder")
encoder.summary()
return encoder
def decoder_factor_vae(self):
z_inp = Input(shape=(10,))
x_rec = Dense(units=128,activation='relu')(z_inp)
x_rec = BatchNormalization()(x_rec)
x_rec = Dense(units=1024,activation='relu')(x_rec) #hit fungerar
x_rec = BatchNormalization()(x_rec)
x_rec = layers.Reshape((4,4,64))(x_rec)
x_rec = layers.Conv2DTranspose(filters=64,activation='relu',padding='same')(
x_rec)
x_rec = BatchNormalization()(x_rec)
x_rec = layers.Conv2DTranspose(filters=32,padding='same')(
x_rec)
x_rec = BatchNormalization()(x_rec)
x_rec = layers.Conv2DTranspose(filters=1,padding='same')(
x_rec)
decoder = keras.Model(z_inp,x_rec,name="decoder") # går att skicka in vilken batchsize som helst
decoder.summary()
return decoder
def MLP_classifier(self):
z_inp = Input(shape=(10,))
x_rec = Dense(units=1000)(z_inp) #1
x_rec = LeakyReLU(alpha=0.3)(x_rec)
x_rec = BatchNormalization()(x_rec)
x_rec = Dense(units=1000)(x_rec) #2
x_rec = LeakyReLU(alpha=0.3)(x_rec)
x_rec = BatchNormalization()(x_rec)
x_rec = Dense(units=1000)(x_rec) # 3
x_rec = LeakyReLU(alpha=0.3)(x_rec)
x_rec = BatchNormalization()(x_rec)
x_rec = Dense(units=1000)(x_rec) # 4
x_rec = LeakyReLU(alpha=0.3)(x_rec)
x_rec = BatchNormalization()(x_rec)
x_rec = Dense(units=1000)(x_rec) # 5
x_rec = LeakyReLU(alpha=0.3)(x_rec)
x_rec = BatchNormalization()(x_rec)
x_rec = Dense(units=2)(x_rec) # 6
classifier = keras.Model(z_inp,name="clasifier")
return classifier
def generate_batches(data):
L = 50
start = 0
end = start + L
y_L_real = np.zeros((L,2))
y_L_fake = np.zeros((L,2))
y_L_real[:,0] = 1
y_L_fake[:,1] = 1
#total_y = np.vstack((y_L_real,y_L_fake))
while True:
x_L_real = data[start:end] #antalet värden är 2xL
x_L_fake = np.roll(x_L_real,shift=2,axis=0)
total_x = np.vstack((x_L_real,x_L_fake))
start += L
end += L
if start >= data.shape[0]:
start = 0
end = L
yield total_x,total_x
"""
data = dsprite()
factor = factor_vae()
xyz = np.load("C:\\Users\\joaki\\OneDrive\\Skrivbord\\images\\dsprites_ndarray_"
"co1sh3sc6or40x32y32_64x64.npz")
test_data = xyz['imgs']
train_steps = 3000
steps_epoch = 300
factor.compile(optimizer=keras.optimizers.Adam(0.001))
train_generator = generate_batches(test_data)
factor.fit_generator(train_generator,steps_per_epoch=steps_epoch,epochs=50)"""
有很多代码,但是只要我使用了整个数据集,它就可以正常工作,但是一旦我尝试使用实现的“ train_generator”,它就会崩溃,并且我收到错误消息:
NotImplementedError:在对Model
类进行子类化时,应实现一个call
方法。因此,我知道train_generator的实现存在问题,但是我不明白我错过了什么,有人可以为我提供更多信息吗?
解决方法
尝试阅读此论坛页面,似乎您应该在子类化时在类中调用方法:
https://github.com/tensorflow/tensorflow/issues/43173
,尽管 keras.Model
的所有子类都必须实现 call
,但它在 Keras 的几个示例中缺失(参见 here 或 here)。在某些情况下,会出现错误“当子类化 Model
类时,您应该实现 call
方法。”被抛出。
我在包含 DataGenerator(从 keras.utils.Sequence
派生)时遇到了这个问题,并通过像这样实现 call()
解决了这个问题:
自编码器
...
def call(self,inputs,training=None,mask=None):
z = self.encoder(inputs=inputs,training=training,mask=mask)
return self.decoder(z)
...
GAN
...
def call(self,mask=None):
batch_size = tf.shape(inputs)[0]
random_latent_vector = tf.random.normal(shape=(batch_size,self.latent_dim))
x = self.generator(inputs=random_latent_vector,mask=mask)
if len(x.shape) != len(inputs.shape):
raise Exception(f'Fake signal ({x.shape}) and real signal ({inputs.shape}) do not have same shape dimension')
return self.critic(inputs=x,mask=mask)
...
这似乎是一个已知问题(参见 here)
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。