首页
学习
活动
专区
圈层
工具
发布
社区首页 >问答首页 >超参数调整(Hyperas)和流水线预处理的交叉验证

超参数调整(Hyperas)和流水线预处理的交叉验证
EN

Stack Overflow用户
提问于 2020-05-19 05:47:49
回答 1查看 650关注 0票数 0

我尝试使用Hyperas优化和交叉验证我的超参数,但无法使用KerasClassifier进行预处理(缩放、过采样/欠采样)流水线

我使用

Hyperas

(hyperopt的包装器)来调优我的神经网络(使用Keras/Tensorflow构建)的超参数,并尝试实现kfold交叉验证以获得最佳参数。但是,我也会对数据进行预处理(Standardscaler和MinMaxScaler),然后使用SMOTETOMEK进行过采样/欠采样)。

阅读

不应该在整个数据集上进行特征缩放和重采样,而应该只在用于训练的部分上进行特征缩放和重采样,以避免溢出。尝试在hyperopt中仅为交叉验证的序列折叠实现这一点有些困难,因为当使用像这样的流水线时

,流水线只适用于KerasClassifier,它只接受一个模型函数。我不能给他这个模型函数,因为hyperopt中的整个验证过程都发生在一个函数中。

你对如何让这样的东西工作有什么建议吗?我可以在中执行所有的预处理吗?

并优化/交叉验证整个数据集上的参数,或者这是否会损害正确的参数查找过程?(我确实有一个用于最终模型的额外测试数据集)

有没有办法让它手动工作?

代码语言:javascript
复制
def data():
    import pandas as pd
    import feather

    df_hyper_X = feather.read_dataframe('df_hyper_X_train.feather')
    df_hyper_Y = feather.read_dataframe('df_hyper_Y_train.feather')

    return df_hyper_X, df_hyper_Y

def hyper_model(df_hyper_X,df_hyper_Y):

  stdscl_features = ['pre_grade', 'math']
  normscl_features = 'time'
  stdscl_transformer = Pipeline(steps=[('stdscaler', StandardScaler())])
  normscl_transformer = Pipeline(steps=[('normscaler', MinMaxScaler())])

  preprocessor = ColumnTransformer(transformers=[('stdscl', stdscl_transformer, stdscl_features),('minmaxscl', normscl_transformer, normscl_features)], remainder='passthrough')

  metrics = [
            tf.keras.metrics.TruePositives(name='tp'),
            tf.keras.metrics.FalsePositives(name='fp'),
            tf.keras.metrics.TrueNegatives(name='tn'),
            tf.keras.metrics.FalseNegatives(name='fn'), 
            tf.keras.metrics.BinaryAccuracy(name='accuracy'),
            tf.keras.metrics.Precision(name='precision'),
            tf.keras.metrics.AUC(name='auc'),
             ]

  model = tf.keras.Sequential()
  model.add(Dense({{choice([2,4,8,16,32,64])}}, activation={{choice(['relu', 'sigmoid', 'tanh', 'elu', 'selu'])}}, kernel_initializer={{choice(['lecun_uniform','glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform'])}}
                  , input_shape=(16,))) #If ReLu use --> HE uniform initialization #kernel_regularizer=tf.keras.regularizers.l2({{choice([0.01, 0.05, 0.1])}}
          #model.add(LeakyReLU(alpha={{uniform(0.5, 1)}}))
  model.add(Dropout({{uniform(0, 1)}}))      
  if ({{choice(['one', 'two'])}}) == 'two':
      model.add(Dense({{choice([2,4,8,16,32,64])}}, activation={{choice(['relu', 'sigmoid', 'tanh', 'elu', 'selu'])}}))
      model.add(Dropout({{uniform(0, 1)}}))

  #model.add(Dense({{choice([2,4,8,16,32,64])}}, activation={{choice(['relu', 'sigmoid', 'tanh', 'elu', 'selu'])}})) third hidden layer
  #model.add(Dropout({{uniform(0, 1)}}))

  model.add(Dense(1, activation='sigmoid'))

  adam = tf.keras.optimizers.Adam(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  nadam = tf.keras.optimizers.Nadam(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  adamax = tf.keras.optimizers.Adamax(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  adagrad = tf.keras.optimizers.Adagrad(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  adadelta = tf.keras.optimizers.Adadelta(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  sgd = tf.keras.optimizers.SGD(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  rmsprop = tf.keras.optimizers.RMSprop(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})


  opti_choice = {{choice(['adam', 'nadam', 'adamax','adagrad', 'adadelta', 'sgd','rmsprop'])}}
  if opti_choice == 'adam':
      optimizer = adam
  elif opti_choice == 'nadam':
      optimizer = nadam
  elif opti_choice == 'adamax':
      optimizer = adamax
  elif opti_choice == 'adagrad':
      optimizer = adagrad
  elif opti_choice == 'adadelta':
      optimizer = adadelta
  elif opti_choice == 'sgd':
      optimizer = sgd
  else:
      optimizer = rmsprop

  model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=metrics)

  kfold = KFold(n_splits=10, shuffle=True, random_state=3)

  imba_pipeline = make_pipeline(preprocessor, SMOTETomek(sampling_strategy='auto', random_state=2),
                                KerasClassifier(model, epochs={{choice([20,30,40,50,60,70])}}, batch_size={{choice([16,32, 64, 128])}}, verbose=0))
  results = cross_val_score(imba_pipeline, df_hyper_X, df_hyper_Y, cv=kfold, scoring='precision').mean()


  print('Precision', results)
  return {'loss': -results, 'status': STATUS_OK, 'model': model}

if __name__ == '__main__':
    best_run, best_model = optim.minimize(model=hyper_model,
                                          data=data,
                                          algo=tpe.suggest,
                                          max_evals=30,
                                          trials=Trials(),
                                          notebook_name = 'drive/My Drive/Colab Notebooks/final_NL_EU_Non-EU')
    X_train, Y_train, X_test, Y_test = data()
    print("Evalutation of best performing model:")
    print(best_model.evaluate(X_test, Y_test))
    print("Best performing model chosen hyper-parameters:")
    print(best_run)
EN

回答 1

Stack Overflow用户

回答已采纳

发布于 2020-05-20 05:25:30

解决了它。如果有人感兴趣,这就是解决方案:

代码语言:javascript
复制
def data():
    import pandas as pd
    import feather

    df_hyper_X = feather.read_dataframe('df_hyper_X_train.feather')
    df_hyper_Y = feather.read_dataframe('df_hyper_Y_train.feather')

    return df_hyper_X, df_hyper_Y

def hyper_model(df_hyper_X,df_hyper_Y):

  ct = ColumnTransformer([('ct_std', StandardScaler(), ['pre_grade', 'math']),('ct_minmax', MinMaxScaler(), ['time'])
  ], remainder='passthrough')

  metrics = [
            tf.keras.metrics.TruePositives(name='tp'),
            tf.keras.metrics.FalsePositives(name='fp'),
            tf.keras.metrics.TrueNegatives(name='tn'),
            tf.keras.metrics.FalseNegatives(name='fn'), 
            tf.keras.metrics.BinaryAccuracy(name='accuracy'),
            tf.keras.metrics.Precision(name='precision'),
            tf.keras.metrics.AUC(name='auc'),
             ]

  model = tf.keras.Sequential()
  model.add(Dense({{choice([2,4,8,16,32,64])}}, activation={{choice(['relu', 'sigmoid', 'tanh', 'elu', 'selu'])}}, kernel_initializer={{choice(['lecun_uniform','glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform'])}}
                  , input_shape=(20,)))
  model.add(Dropout({{uniform(0, 0.5)}}))

  if ({{choice(['one', 'two'])}}) == 'two':
      model.add(Dense({{choice([2,4,8,16,32,64])}}, activation={{choice(['relu', 'sigmoid', 'tanh', 'elu', 'selu'])}}))
      model.add(Dropout({{uniform(0, 0.5)}}))

  model.add(Dense(1, activation='sigmoid'))

  adam = tf.keras.optimizers.Adam(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  nadam = tf.keras.optimizers.Nadam(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  adamax = tf.keras.optimizers.Adamax(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  adagrad = tf.keras.optimizers.Adagrad(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  adadelta = tf.keras.optimizers.Adadelta(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  sgd = tf.keras.optimizers.SGD(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
  rmsprop = tf.keras.optimizers.RMSprop(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})

  opti_choice = {{choice(['adam', 'nadam', 'adamax','adagrad', 'adadelta', 'sgd','rmsprop'])}}
  if opti_choice == 'adam':
      optimizer = adam
  elif opti_choice == 'nadam':
      optimizer = nadam
  elif opti_choice == 'adamax':
      optimizer = adamax
  elif opti_choice == 'adagrad':
      optimizer = adagrad
  elif opti_choice == 'adadelta':
      optimizer = adadelta
  elif opti_choice == 'sgd':
      optimizer = sgd
  else:
      optimizer = rmsprop

  model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=metrics)

  smt = SMOTETomek(sampling_strategy='auto', random_state=2)
  kfold = KFold(n_splits=10, shuffle=True, random_state=3)  
  scores = []

  for train_fold_index, val_fold_index in kfold.split(df_hyper_X,df_hyper_Y):

    X_train_fold, y_train_fold = df_hyper_X.iloc[train_fold_index], df_hyper_Y.iloc[train_fold_index]

    X_val_fold, y_val_fold = df_hyper_X.iloc[val_fold_index], df_hyper_Y.iloc[val_fold_index]

    X_train_fold = ct.fit_transform(X_train_fold)
    X_val_fold = ct.transform(X_val_fold)

    X_train_smtk, y_train_smtk = smt.fit_resample(X_train_fold, y_train_fold)

    model.fit(X_train_smtk, y_train_smtk, epochs={{choice([20,30,40,50,60,70])}}, batch_size={{choice([16,32, 64, 128])}})

    predicts = model.predict(X_val_fold)
    score = precision_score(y_val_fold, predicts.round())
    scores.append(score)

  avg_score = np.mean(scores)    
  print('Precision', avg_score)
  return {'loss': -avg_score, 'status': STATUS_OK, 'model': model}

if __name__ == '__main__':
    best_run, best_model = optim.minimize(model=hyper_model,
                                          data=data,
                                          algo=tpe.suggest,
                                          max_evals=2,
                                          trials=Trials(),
                                          notebook_name = 'drive/My Drive/Colab Notebooks/final_NL_EU_Non-EU')
    df_hyper_X, df_hyper_Y = data()
    print("Best performing model chosen hyper-parameters:")
    print(best_run)
票数 0
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/61879535

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档