在tensorflow-2中训练cifar10数据时,我得到了以下错误。我用了这个教程。
TypeError:预期的float32传递给op‘相等’的参数'y‘,而得到'str’类型的'collections‘。错误:预期的float32,得到'str‘类型的“集合”。
我的代码看起来是:
class Mymodel(tf.keras.Model):
def __init__(self, class_size):
"""Initialize parameters and build model.
"""
super(Mymodel, self).__init__()
self.class_size =class_size
self.conv1 = tf.keras.layers.Conv2D(32, kernel_size =3, strides =2, activation='relu')
self.conv2 = tf.keras.layers.Conv2D(64, kernel_size =2, strides =2, activation='relu')
self.conv3 = tf.keras.layers.Conv2D(64, kernel_size =2, strides =1, activation='relu')
self.flat = tf.keras.layers.Flatten()
self.d1 = tf.keras.layers.Dense(512, activation='relu')
self.d2 = tf.keras.layers.Dense(128, activation='relu')
self.fd =tf.keras.layers.Dense(self.class_size, activation='softmax')
def call(self, inputs):
x = self.conv1(inputs)
x = self.conv2(x)
x = self.conv3(x)
x = self.flat(x)
x = self.d1(x)
x = self.d2(x)
return self.fd(x)
model = Mymodel(10)
train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
train_ds = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(1000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(32)
# define the training and testing objects
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
loss(loss)
accuracy(labels, predictions)
tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
loss(t_loss)
accuracy(labels, predictions)
def train():
EPOCHS = 5
for epoch in range(EPOCHS):
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
# Reset the metrics for the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
train()当我替换了编译和fit函数时,它就能工作了。
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, batch_size= 200, epochs=6, validation_data=(test_images, test_labels))为任何帮助拨出了大量资金。
发布于 2020-05-19 10:56:43
在损失函数中设置from_logits=True。
tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) = loss_object
它解决了错误!
发布于 2020-03-20 02:14:53
我想您可以先使用input_shape param。
self.conv1 = tf.keras.layers.Conv2D(32,kernel_size =3,大步=2,激活=‘relu’,input_shape=(w,h,n_channel)
https://stackoverflow.com/questions/59077093
复制相似问题