我试图调整Keras模型找到的这里适合我的数据,我很想包括一个调用函数,以便能够集成验证数据作为一个监视器的性能。然而,我想不出如何将我根据数据计算的不同的损失传递给它。
下面是我的代码当前的样子:
class VAE(keras.Model):
def __init__(self, encoder, decoder, **kwargs):
super(VAE, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.total_loss_tracker = keras.metrics.Mean(name="loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.kl_loss_tracker = keras.metrics.Mean(name="kl_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.kl_loss_tracker,
]
def train_step(self, data):
if isinstance(data, tuple):
data = data[0]
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(data)
reconstruction = self.decoder(z)
## BASE RECONSTRUCTION LOSS:
reconstruction_loss = tf.reduce_mean( keras.losses.binary_crossentropy(data, reconstruction) )
## ELBO RECONSTRUCTION LOSS:
# reconstruction_loss = tf.reduce_mean( keras.backend.sum(keras.backend.binary_crossentropy(data, reconstruction), axis=-1) )
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
## BASE TOTAL LOSS:
total_loss = reconstruction_loss + kl_loss
## WEIGHTED TOTAL LOSS: try to increase importance of reconstruction loss
# total_loss = reconstruction_loss + 0.1*kl_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.kl_loss_tracker.update_state(kl_loss)
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"kl_loss": self.kl_loss_tracker.result(),
}
def call(self, data):
## TENTATIVE CALL FUNCTION FOR VALIDATION DATA
z_mean, z_log_var, z = self.encoder(data)
reconstruction = self.decoder(z)
reconstruction_loss = tf.reduce_mean( keras.losses.binary_crossentropy(data, reconstruction) )
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
total_loss = reconstruction_loss + kl_loss
self.add_loss(reconstruction_loss)
self.add_loss(kl_loss)
self.add_loss(total_loss)
return reconstructionself.add_loss()来自TF指南的此页,但是在培训期间,日志显示了所有验证损失的0.0。
我应该使用另一个度量和跟踪器并更新这些吗?
发布于 2022-07-26 12:55:59
就我个人而言,当我学习如何使用keras.Model时,keras上的所有内容都是阿拉伯语(我一个阿拉伯单词都不知道).但是,这一页的TF文件非常清楚地解释了它,特别是解释了test_step所做的事情,这就是您要寻找的:
class VAE(keras.Model):
def __init__(self, encoder, decoder, **kwargs):
super(VAE, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.kl_loss_tracker = keras.metrics.Mean(name="kl_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.kl_loss_tracker,
]
def train_step(self, data):
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(data)
reconstruction = self.decoder(z)
reconstruction_loss = tf.reduce_mean(
tf.reduce_sum(
keras.losses.binary_crossentropy(data, reconstruction), axis=(1, 2)
)
)
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
total_loss = reconstruction_loss + kl_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.kl_loss_tracker.update_state(kl_loss)
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"kl_loss": self.kl_loss_tracker.result(),
}
def test_step(self, data):
## TENTATIVE CALL FUNCTION FOR VALIDATION DATA
z_mean, z_log_var, z = self.encoder(data)
reconstruction = self.decoder(z)
reconstruction_loss = tf.reduce_mean( keras.losses.binary_crossentropy(data, reconstruction) )
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
total_loss = reconstruction_loss + kl_loss
return {
"loss": total_loss,
"reconstruction_loss": reconstruction_loss,
"kl_loss": kl_loss,
}我可以看到这个输出:
Epoch 1/30
438/438 [==============================] - 7s 15ms/step - loss: 147.3851 - reconstruction_loss: 141.3100 - kl_loss: 6.2865 - val_loss: 6.6573 - val_reconstruction_loss: 0.1790 - val_kl_loss: 6.4783我觉得是你在找
https://stackoverflow.com/questions/73123467
复制相似问题