超参数调优(Hyperas)与管道预处理的交叉验证
简而言之,我尝试用Hyperas来优化和交叉验证我的超参数,但无法让KerasClassifier的预处理(缩放、过采样/欠采样)管道正常工作。
我使用Hyperas(一个用于hyperopt的工具)来调整我的神经网络(用Keras/Tensorflow构建)的超参数,并尝试实现k折交叉验证,以找到最佳参数。不过,我还对数据进行了预处理(使用StandardScaler和MinMaxScaler),然后用SMOTETOMEK进行过采样和欠采样。
我了解到,不应该对整个数据集进行特征缩放和重采样,而只对用于训练的部分进行,以避免数据泄露。尝试在hyperopt中仅对交叉验证的训练部分实现这一点有点困难,因为使用像imblearn
这样的管道时,管道只能与KerasClassifier一起工作,而KerasClassifier只接受一个模型函数。我无法提供这个模型函数,因为hyperopt中的整个验证过程是在一个函数中进行的。
你有什么建议可以让这个工作正常吗?我可以在def data()
中完成所有的预处理,并在整个数据集上优化/交叉验证参数吗,还是这样会影响正确的参数查找过程?(我确实有一个额外的测试数据集用于最终模型)
有没有办法手动实现这个?
def data():
import pandas as pd
import feather
df_hyper_X = feather.read_dataframe('df_hyper_X_train.feather')
df_hyper_Y = feather.read_dataframe('df_hyper_Y_train.feather')
return df_hyper_X, df_hyper_Y
def hyper_model(df_hyper_X,df_hyper_Y):
stdscl_features = ['pre_grade', 'math']
normscl_features = 'time'
stdscl_transformer = Pipeline(steps=[('stdscaler', StandardScaler())])
normscl_transformer = Pipeline(steps=[('normscaler', MinMaxScaler())])
preprocessor = ColumnTransformer(transformers=[('stdscl', stdscl_transformer, stdscl_features),('minmaxscl', normscl_transformer, normscl_features)], remainder='passthrough')
metrics = [
tf.keras.metrics.TruePositives(name='tp'),
tf.keras.metrics.FalsePositives(name='fp'),
tf.keras.metrics.TrueNegatives(name='tn'),
tf.keras.metrics.FalseNegatives(name='fn'),
tf.keras.metrics.BinaryAccuracy(name='accuracy'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.AUC(name='auc'),
]
model = tf.keras.Sequential()
model.add(Dense({{choice([2,4,8,16,32,64])}}, activation={{choice(['relu', 'sigmoid', 'tanh', 'elu', 'selu'])}}, kernel_initializer={{choice(['lecun_uniform','glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform'])}}
, input_shape=(16,))) #If ReLu use --> HE uniform initialization #kernel_regularizer=tf.keras.regularizers.l2({{choice([0.01, 0.05, 0.1])}}
#model.add(LeakyReLU(alpha={{uniform(0.5, 1)}}))
model.add(Dropout({{uniform(0, 1)}}))
if ({{choice(['one', 'two'])}}) == 'two':
model.add(Dense({{choice([2,4,8,16,32,64])}}, activation={{choice(['relu', 'sigmoid', 'tanh', 'elu', 'selu'])}}))
model.add(Dropout({{uniform(0, 1)}}))
#model.add(Dense({{choice([2,4,8,16,32,64])}}, activation={{choice(['relu', 'sigmoid', 'tanh', 'elu', 'selu'])}})) third hidden layer
#model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(1, activation='sigmoid'))
adam = tf.keras.optimizers.Adam(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
nadam = tf.keras.optimizers.Nadam(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
adamax = tf.keras.optimizers.Adamax(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
adagrad = tf.keras.optimizers.Adagrad(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
adadelta = tf.keras.optimizers.Adadelta(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
sgd = tf.keras.optimizers.SGD(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
rmsprop = tf.keras.optimizers.RMSprop(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
opti_choice = {{choice(['adam', 'nadam', 'adamax','adagrad', 'adadelta', 'sgd','rmsprop'])}}
if opti_choice == 'adam':
optimizer = adam
elif opti_choice == 'nadam':
optimizer = nadam
elif opti_choice == 'adamax':
optimizer = adamax
elif opti_choice == 'adagrad':
optimizer = adagrad
elif opti_choice == 'adadelta':
optimizer = adadelta
elif opti_choice == 'sgd':
optimizer = sgd
else:
optimizer = rmsprop
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=metrics)
kfold = KFold(n_splits=10, shuffle=True, random_state=3)
imba_pipeline = make_pipeline(preprocessor, SMOTETomek(sampling_strategy='auto', random_state=2),
KerasClassifier(model, epochs={{choice([20,30,40,50,60,70])}}, batch_size={{choice([16,32, 64, 128])}}, verbose=0))
results = cross_val_score(imba_pipeline, df_hyper_X, df_hyper_Y, cv=kfold, scoring='precision').mean()
print('Precision', results)
return {'loss': -results, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=hyper_model,
data=data,
algo=tpe.suggest,
max_evals=30,
trials=Trials(),
notebook_name = 'drive/My Drive/Colab Notebooks/final_NL_EU_Non-EU')
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run)
相关问题:
- 暂无相关问题
1 个回答
1
解决了。如果有人感兴趣的话,这就是解决方案:
def data():
import pandas as pd
import feather
df_hyper_X = feather.read_dataframe('df_hyper_X_train.feather')
df_hyper_Y = feather.read_dataframe('df_hyper_Y_train.feather')
return df_hyper_X, df_hyper_Y
def hyper_model(df_hyper_X,df_hyper_Y):
ct = ColumnTransformer([('ct_std', StandardScaler(), ['pre_grade', 'math']),('ct_minmax', MinMaxScaler(), ['time'])
], remainder='passthrough')
metrics = [
tf.keras.metrics.TruePositives(name='tp'),
tf.keras.metrics.FalsePositives(name='fp'),
tf.keras.metrics.TrueNegatives(name='tn'),
tf.keras.metrics.FalseNegatives(name='fn'),
tf.keras.metrics.BinaryAccuracy(name='accuracy'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.AUC(name='auc'),
]
model = tf.keras.Sequential()
model.add(Dense({{choice([2,4,8,16,32,64])}}, activation={{choice(['relu', 'sigmoid', 'tanh', 'elu', 'selu'])}}, kernel_initializer={{choice(['lecun_uniform','glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform'])}}
, input_shape=(20,)))
model.add(Dropout({{uniform(0, 0.5)}}))
if ({{choice(['one', 'two'])}}) == 'two':
model.add(Dense({{choice([2,4,8,16,32,64])}}, activation={{choice(['relu', 'sigmoid', 'tanh', 'elu', 'selu'])}}))
model.add(Dropout({{uniform(0, 0.5)}}))
model.add(Dense(1, activation='sigmoid'))
adam = tf.keras.optimizers.Adam(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
nadam = tf.keras.optimizers.Nadam(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
adamax = tf.keras.optimizers.Adamax(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
adagrad = tf.keras.optimizers.Adagrad(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
adadelta = tf.keras.optimizers.Adadelta(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
sgd = tf.keras.optimizers.SGD(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
rmsprop = tf.keras.optimizers.RMSprop(lr={{choice([0.0001, 0.001, 0.01, 0.1])}})
opti_choice = {{choice(['adam', 'nadam', 'adamax','adagrad', 'adadelta', 'sgd','rmsprop'])}}
if opti_choice == 'adam':
optimizer = adam
elif opti_choice == 'nadam':
optimizer = nadam
elif opti_choice == 'adamax':
optimizer = adamax
elif opti_choice == 'adagrad':
optimizer = adagrad
elif opti_choice == 'adadelta':
optimizer = adadelta
elif opti_choice == 'sgd':
optimizer = sgd
else:
optimizer = rmsprop
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=metrics)
smt = SMOTETomek(sampling_strategy='auto', random_state=2)
kfold = KFold(n_splits=10, shuffle=True, random_state=3)
scores = []
for train_fold_index, val_fold_index in kfold.split(df_hyper_X,df_hyper_Y):
X_train_fold, y_train_fold = df_hyper_X.iloc[train_fold_index], df_hyper_Y.iloc[train_fold_index]
X_val_fold, y_val_fold = df_hyper_X.iloc[val_fold_index], df_hyper_Y.iloc[val_fold_index]
X_train_fold = ct.fit_transform(X_train_fold)
X_val_fold = ct.transform(X_val_fold)
X_train_smtk, y_train_smtk = smt.fit_resample(X_train_fold, y_train_fold)
model.fit(X_train_smtk, y_train_smtk, epochs={{choice([20,30,40,50,60,70])}}, batch_size={{choice([16,32, 64, 128])}})
predicts = model.predict(X_val_fold)
score = precision_score(y_val_fold, predicts.round())
scores.append(score)
avg_score = np.mean(scores)
print('Precision', avg_score)
return {'loss': -avg_score, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=hyper_model,
data=data,
algo=tpe.suggest,
max_evals=2,
trials=Trials(),
notebook_name = 'drive/My Drive/Colab Notebooks/final_NL_EU_Non-EU')
df_hyper_X, df_hyper_Y = data()
print("Best performing model chosen hyper-parameters:")
print(best_run)