import numpy as np
from keras.datasets import cifar10
from keras.layers import Dense, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
from keras import backend as k
from keras.models import Sequential
(Xtr,Ytr),(Xte,Yte)=cifar10.load_data()
Xtr = Xtr.astype('float32')
Xte = Xte.astype('float32')
Xtr = Xtr.reshape(50000, 3072)
Xte = Xte.reshape(10000, 3072)
Ytr = np_utils.to_categorical(Ytr, 10)
Yte = np_utils.to_categorical(Yte, 10)
model=Sequential()
model.add(Dense(100, input_shape=Xtr.shape[1:]))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="sgd", metrics=['accuracy'])
model.fit(Xtr, Ytr, batch_size=200, epochs=30, shuffle=True,verbose=1)
scores = model.evaluate(Xte, Yte, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))其结果是:

我正试图在cifar-10上创建一个相当基本的2层NN。我知道数据不是经过预处理的。但这不可能是什么都学不到的原因。我在哪里搞错了?
发布于 2018-11-16 05:43:00
我将通过一个帮助你开始的例子。它应该得到大约50%的准确性。
因此,我将加载数据的代码与您的代码保持相同。唯一的区别是,我将数据规范化为0到1之间,这通常是为了更紧密地绑定权重。
import numpy as np
from keras.datasets import cifar10
from keras.layers import Dense, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
from keras import backend as k
from keras.models import Sequential
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint
from keras.models import model_from_json
from keras import backend as K
(x_train, y_train), (x_test, y_test)=cifar10.load_data()从https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz 170500096/170498071 ============================== - 71s 0us/step下载数据
(x_train, y_train), (x_test, y_test)=cifar10.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape(50000, 3072)
x_test = x_test.reshape(10000, 3072)
# The known number of output classes.
num_classes = 10
# Channels go last for TensorFlow backend
x_train_reshaped = x_train.reshape(x_train.shape[0], x_train.shape[1],)
x_test_reshaped = x_test.reshape(x_test.shape[0], x_test.shape[1],)
input_shape = (x_train.shape[1],)
# Convert class vectors to binary class matrices. This uses 1 hot encoding.
y_train_binary = keras.utils.to_categorical(y_train, num_classes)
y_test_binary = keras.utils.to_categorical(y_test, num_classes)现在让我们做我们的模型。在这里,我确实使隐藏层每层有更多的神经元。
model = Sequential()
model.add(Dense(32,
activation='relu',
input_shape=input_shape))
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])现在我们可以训练模型了
epochs = 4
batch_size = 128
# Fit the model weights.
model.fit(x_train_reshaped, y_train_binary,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test_reshaped, y_test_binary))50000/50000 ============================== - 3s 58 3s/步进损失: 1.6716 - acc: 0.4038 - val_loss: 1.6566 - val_acc: 0.4094
https://datascience.stackexchange.com/questions/41278
复制相似问题