我了解了scikit-optimize包,并且我对贝叶斯优化相对来说还是比较新的,我想在我当前的卷积神经网络中使用它。然而,我试图用Bayesian-optimization来寻找卷积神经网络的最佳超参数,但我目前的尝试没有正常工作。
到目前为止,我试图实现这个目的,但我的代码不能正常工作,我不知道我的代码的哪一部分仍然存在问题。有人能告诉我怎么纠正这个问题吗?为了寻找最佳的超参数,在卷积神经网络上使用贝叶斯优化有效率的实现吗?有什么想法吗?
更新
我尝试了GridSearchCV,RandomSearchCV作为我的卷积神经网络,它有很深的层,并且使用GridSearchCV花费了太多的时间来完成,即使2-3天也无法完成优化。我想使用新的优化框架,如贝叶斯优化(即skopt,optuna),来寻找卷积神经网络的最佳参数和超参数。有人能为我目前在colab的尝试1和我在实验室的第二次尝试提供可能的补救和有效的方法吗?有什么想法吗?
我目前的尝试
以下是我目前使用scikit-optimize包进行贝叶斯优化的尝试。这里是我在这个实验室的尝试,我在这里运行了在卷积神经网络上实现贝叶斯优化的所有实验,以找到它的最佳超参数:
### function returned to Bayesian Optimization
@use_named_args(dimensions=dimensions)
def bayes_opt(cnn_num_steps, cnn_init_epoch, cnn_max_epoch,
cnn_learning_rate_decay, cnn_batch_size, cnn_dropout_rate, cnn_init_learning_rate):
global iteration, num_steps, init_epoch, max_epoch, learning_rate_decay, dropout_rate, init_learning_rate, batch_size
num_steps = np.int32(cnn_num_steps)
batch_size = np.int32(cnn_batch_size)
learning_rate_decay = np.float32(cnn_learning_rate_decay)
init_epoch = np.int32(cnn_init_epoch)
max_epoch = np.int32(cnn_max_epoch)
dropout_rate = np.float32(cnn_dropout_rate)
init_learning_rate = np.float32(cnn_init_learning_rate)
tf.reset_default_graph()
tf.set_random_seed(randomState)
sess = tf.Session()
(train_X, train_y), (test_X, test_y) = cifar10.load_data()
train_X = train_X.astype('float32') / 255.0
test_X = test_X.astype('float32') / 255.0
targets = tf.placeholder(tf.float32, [None, input_size], name="targets")
model_learning_rate = tf.placeholder(tf.float32, None, name="learning_rate")
model_dropout_rate = tf.placeholder_with_default(0.0, shape=())
global_step = tf.Variable(0, trainable=False)
prediction = cnn(model_dropout_rate, model_learning_rate)
model_learning_rate = tf.train.exponential_decay(learning_rate=model_learning_rate, global_step=global_step, decay_rate=learning_rate_decay,
decay_steps=init_epoch, staircase=False)
with tf.name_scope('loss'):
model_loss = tf.losses.mean_squared_error(targets, prediction)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(model_learning_rate).minimize(model_loss,global_step=global_step)
sess.run(tf.global_variables_initializer())
for epoch_step in range(max_epoch):
for batch_X, batch_y in generate_batches(train_X, train_y, batch_size):
train_data_feed = {
inputs: batch_X,
targets: batch_y,
model_learning_rate: init_learning_rate,
model_dropout_rate: dropout_rate
}
sess.run(train_step, train_data_feed)
## how to return validation error, any idea?
## return validation error
## return val_error我目前在实验室的尝试仍然有各种各样的问题,而且它还没有完成。谁能提供可行的方法,利用贝叶斯优化找到最优超参数的非常深卷积神经网络?有什么想法吗?谢谢!
发布于 2020-08-14 19:12:28
我建议您将Keras Tuner包用于Bayesian Optimization。
下面只是一个关于如何实现这一目标的小例子。
from kerastuner import HyperModel, Objective
import tensorflow as tf
from kerastuner.tuners import BayesianOptimization
# Create the keras tuner model.
class MyHyperModel(HyperModel):
def build(self, hp):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(len(tokenizer.word_index) + 1, embedding_dim))
for i in range(hp.Int('num_layers', 1, 3)):
model.add(tf.keras.layers.Conv1D(filters=hp.Choice('num_filters', values=[32, 64], default=64),activation='relu',
kernel_size=3,
bias_initializer='glorot_uniform'))
model.add(tf.keras.layers.MaxPool1D())
model.add(tf.keras.layers.GlobalMaxPool1D())
for i in range(hp.Int('num_layers_rnn', 1, 3)):
model.add(tf.keras.layers.Dense(units=hp.Int('units', min_value=32, max_value=512, step=32), activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(
optimizer=hp.Choice('optimizer', values= ['Adam', 'Adadelta', 'Adamax']),
loss='binary_crossentropy',
metrics=[f1])
return model然后,一旦创建,您就可以通过下面的代码开始模型的培训。
hypermodel = MyHyperModel()
tuner = BayesianOptimization(
hypermodel,
objective=Objective('val_f1', direction="max"),
num_initial_points=50,
max_trials=15,
directory='./',
project_name='real_or_not')
tuner.search(train_dataset,
epochs=10, validation_data=validation_dataset)您可以查看这个链接上的文档。我还附加了一个指向Kaggle 记事本的链接,它演示了我自己编写的Bayesian Optimization。我是附加的链接,以便您可以尝试实际的例子。请随时提出任何进一步的问题。
更新: 16/08
您评论说,您希望使用Bayesian Optimization对以下超参数进行优化。我将以以下方式处理这个问题。
import tensorflow as tf
from kerastuner import HyperModel, Objective
from kerastuner.tuners import BayesianOptimization
class MyHyperModel(HyperModel):
def build(self, hp):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=hp.Choice('num_filters', values=[32, 64], default=64),activation='relu',
kernel_size=(3,3),
bias_initializer='glorot_uniform', input_shape=(32, 32, 3)))
model.add(tf.keras.layers.MaxPooling2D())
for i in range(hp.Int('num_layers', 1, 3)):
model.add(tf.keras.layers.Conv2D(filters=hp.Choice('num_filters', values=[32, 64], default=64),activation='relu',
kernel_size=(3,3),
bias_initializer='glorot_uniform'))
model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Flatten())
for i in range(hp.Int('num_layers_rnn', 1, 3)):
model.add(tf.keras.layers.Dense(units=hp.Int('units', min_value=32, max_value=512, step=32), activation='relu'))
model.add(tf.keras.layers.Dropout(rate=hp.Choice('droup_out_rate', values=[0.2, 0.4, 0.5], default=0.2)))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(
optimizer=tf.keras.optimizers.Adam(
hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
loss='binary_crossentropy',
metrics=['accuracy'])
return model
class MyTuner(BayesianOptimization):
def run_trial(self, trial, *args, **kwargs):
# You can add additional HyperParameters for preprocessing and custom training loops
# via overriding `run_trial`
kwargs['batch_size'] = trial.hyperparameters.Int('batch_size', 32, 256, step=32)
kwargs['epochs'] = trial.hyperparameters.Int('epochs', 10, 30)
super(MyTuner, self).run_trial(trial, *args, **kwargs)
hypermodel = MyHyperModel()
tuner = MyTuner(
hypermodel,
objective=Objective('val_acc', direction="max"),
num_initial_points=50,
max_trials=15,
directory='./',
project_name='cnn_bayesian_opt')
tuner.search(train_dataset, validation_data=validation_dataset)您还可以查看github问题,解释如何优化epochs和batch_size 这里。
以上代码将根据您的请求对下列参数进行调优。
number_of_convolutional_filternumber_of_hidden_layerdrop_ratelearning_ratebatch_sizeepochs发布于 2020-08-16 13:06:25
Ax平台是对深度神经网络进行贝叶斯优化的有力工具。下面是我使用ax的方法,如下所示:
构建CNN模型
!pip install ax-platform
from tensorflow.keras import models
from ax.service.managed_loop import optimize
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
def build_model(opt, dropout):
model = models.Sequential()
model.add(Conv2D(32, kernel_size=(3,3), input_shape=(32,32,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(n_hidden))
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model列车CNN模型
下一步是训练CNN模型并返回其准确性,这将用于贝叶斯优化:
def train_evaluate(param):
acc = 0
mymodel = build_model(opt=param["opt"], dropout=param["dropout"])
mymodel.fit(X_train, y_train, epochs=param["epochs"], batch_size=param["batch_size"],verbose=1, validation_data=[X_test, y_test])
acc = mymodel.evaluate(X_test, y_test)[1]
print(param, acc)
del mymodel
return acc运行贝叶斯优化
best_parameters, values, experiment, model = optimize(
parameters=[
{"name": "opt", "type": "choice", "values": ['adam', 'rmsprop', 'sgd']},
{"name": "dropout", "type": "choice", "values": [0.0, 0.25, 0.50, 0.75, 0.99]},
{"name": "epochs", "type": "choice", "values": [10, 50, 100]},
{"name": "batch_size", "type": "choice", "values": [32,64, 100, 128]}
],
evaluation_function=train_evaluate,
objective_name="acc",
total_trials=10,
)返回最佳参数
data = experiment.fetch_data()
df = data.df
best_arm_name = df.arm_name[df["mean"] == df["mean"].max()].values[0]
best_arm = experiment.arms_by_name[best_arm_name]
print(best_parameters)
print(best_arm)请注意,您可以添加其他要优化的参数,如learning_rate、num_hidden_layer等,其方式与我前面展示的方式相同。我希望这能满足你的需要。如果你还有问题,请告诉我。祝好运!
https://stackoverflow.com/questions/63321271
复制相似问题