我正在使用梯度带计算鉴别器的梯度。鉴别器模型有自定义的层和滤波器,但梯度为零。生成器的梯度计算得很好。模型正在处理音频和初始化为tf变量的自定义过滤器,可训练变量设置为true
这是鉴别器
def discriminator(images_from_before, filters):
#Discriminator
#3x1 Convolution
conv_3 = tf.nn.conv1d(input = images_from_before,
filters = filters.filters[0][0],
stride = 1,
padding = 'SAME',
data_format = 'NWC')
#9x1 Convolution
conv_9 = tf.nn.conv1d(input = images_from_before,
filters = filters.filters[0][1],
stride = 1,
padding = 'SAME',
data_format = 'NWC')
#27x1 Convolution
conv_27 = tf.nn.conv1d(input = images_from_before,
filters = filters.filters[0][2], #some of the whole tfVariable
stride = 1,
padding = 'SAME',
data_format = 'NWC')
#81x1 Convolution
conv_81 = tf.nn.conv1d(input = images_from_before,
filters = filters.filters[0][3], #some of the whole tfVariable
stride = 1,
padding = 'SAME',
data_format = 'NWC')
out = tf.concat([conv_3,conv_9,conv_27,conv_81], 2)
out = leaky_relu(out,0.2)
#7x Discriminator block
for i in range(7):
out = discriminator_block(out,filters.filters[i+1],filters.BN_val[i])
#Flatten width of the out tensor
mul_FC = out.shape[1] * out.shape[2]
#FC - Dropout - LeakyReLU
#Flatten out
out = tf.reshape(out, shape = [out.shape[0],mul_FC])
out = FClayer(out,filters.FC1, filters.bias1)
out = Dropout(out)
out = leaky_relu(out,0.2)
#FC - Sigmoid
out = FClayer(out, filters.FC2, filters.bias2)
out = tf.math.sigmoid(out)
#implicit mean over the minibatch samples
out = tf.math.reduce_mean(out)
out = tf.clip_by_value(out,0.1, 0.9)
return out梯度的计算方法如下
with tf.GradientTape() as gen_tape, tf.GradientTape() as dis_tape:
gen_out = generator.generator(downsampled, num_of_blocks, gen_var)
gen_dis = discriminator.discriminator(gen_out,dis_var)
#compute losses
gen_loss = losses.generator_loss(
losses.L2_loss(upsampled, gen_out),
losses.Lf_loss(upsampled, gen_out,auto_var,4),
losses.Ladv_loss(gen_dis,dis_var), #updated
lamda_f,
lamda_adv)
dis_loss = losses.discriminator_loss(0.1,
gen_dis)
print('Gen loss: {}'.format(gen_loss.numpy()))
print('Dis loss: {}'.format(dis_loss.numpy()))
gen_grads = gen_tape.gradient(gen_loss, [gen_var.dfilters,
gen_var.ufilters,
gen_var.finalfilter,
gen_var.prelu])
disc_grads = dis_tape.gradient(dis_loss, [dis_var.filters,
dis_var.BN_val,
dis_var.FC1,
dis_var.bias1,
dis_var.FC2,
dis_var.bias2])发布于 2021-06-29 05:16:52
梯度带需要有参数persistent=True
https://stackoverflow.com/questions/62952341
复制相似问题