在mac和ubuntu上运行相同代码的python tensorflow抛出

2024-04-19 04:44:12 发布

您现在位置:Python中文网/ 问答频道 /正文

我在mac和ubuntu上运行一个完全相同的神经网络模型。神经网络在一个循环中运行3次,并输出预测和精度。代码如下:

import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
import pandas as pd
import matplotlib
from matplotlib import style
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.layers.normalization import BatchNormalization
from keras.optimizers import RMSprop
import numpy as np
from functools import reduce
import tensorflow as tf
from keras import backend as K
import os

#%matplotlib inline
matplotlib.style.use('ggplot')

def neural_net(set_val, epoch_val, batch_val, label_val):
    sess = tf.Session()
    K.set_session(sess)
    server = tf.train.Server.create_local_server()
    sess = tf.Session(server.target)
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    def neural_net_1(label1, pred_1):

        data_num_train = pd.read_csv(str(set_val) + 'final_train_set_all_2.csv')
        data_num_test = pd.read_csv(str(set_val) + 'final_test_set_all_2.csv')

        scaler = MinMaxScaler(feature_range=(0, 1))

        # Train Data
        features_train = data_num_train.drop([label1], axis=1, errors='ignore')
        features_train = pd.DataFrame(scaler.fit_transform(features_train))
        scale_num_data_train = pd.concat([data_num_train[label1], features_train], axis=1)
        dtrain_train, dtest_train = train_test_split(scale_num_data_train, test_size=0.01, random_state=575)
        X_train = dtrain_train.drop([label1], axis=1, errors='ignore')
        y_train = dtrain_train[label1]

        # Test Data
        features_test = data_num_test.drop([label1], axis=1, errors='ignore')
        features_test = pd.DataFrame(scaler.fit_transform(features_test))
        scale_num_data_test = pd.concat([data_num_test[label1], features_test], axis=1)
        dtrain_test, dtest_test = train_test_split(scale_num_data_test, test_size=0.99, random_state=575)
        X_test = dtest_test.drop([label1], axis=1, errors='ignore')
        y_test = dtest_test[label1]

        # initiate model
        model = Sequential()

        # Input Layer 1
        model.add(Dense(256, input_dim=1970, init='uniform'))
        model.add(BatchNormalization())
        model.add(Activation('tanh'))
        model.add(Dropout(0.2))

        # Hidden Layer 2
        model.add(Dense(256, init='uniform'))
        model.add(BatchNormalization())
        model.add(Activation('tanh'))
        model.add(Dropout(0.2))

        # Hidden Layer 3
        model.add(Dense(256, init='uniform'))
        model.add(BatchNormalization())
        model.add(Activation('tanh'))
        model.add(Dropout(0.2))

        # Hidden Layer 4
        model.add(Dense(256, init='uniform'))
        model.add(BatchNormalization())
        model.add(Activation('tanh'))

        # Output Layer 5
        model.add(Dense(10, init='uniform'))
        model.add(BatchNormalization())
        model.add(Activation('softmax'))

        model.summary()
        rms_prop = RMSprop(lr=0.0001, rho=0.9, epsilon=None, decay=0.0)
        model.compile(optimizer=rms_prop, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        model.fit(X_train, y_train, epochs=epoch_val, batch_size=batch_val, shuffle=True)

        scores = model.evaluate(X_test, y_test, batch_size=batch_val)
        print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

        acc_random, accuracy_dict = ("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100)).split(" ", 1)
        accuracy_dict = accuracy_dict.replace('%', '')
        accuracy_dict = {str(pred_1): accuracy_dict}
        accuracy_df = pd.DataFrame(data=accuracy_dict, index=[0])
        accuracy_df.to_csv('mid_accuracy' + str(set_val) + '_' + label1 + '_' + str(pred_1) + '.csv')

        pred = model.predict_classes(X_test)
        print(pred)
        print(type(pred))

        # output: <class 'numpy.ndarray'>

        Xtest = scaler.inverse_transform(X_test)
        Xtest = pd.DataFrame(data=Xtest[0:, 0:], columns=Xtest[0, 0:])
        print(Xtest.head(n=20))
        print(type(Xtest))

        # output: [20 rows x 1970 columns]
        # output: <class 'pandas.core.frame.DataFrame'>

        Xtest['prediction' + str(pred_1) + str(label1)] = pred

        col_vals = pd.read_csv(str(set_val) + 'final_test_set_all_2.csv')
        col_vals = col_vals.columns.get_values()
        col_val_list = col_vals.tolist()
        col_val_list.extend(['prediction' + '_' + str(set_val) + '_' + label1 + '_' + str(epoch_val) + '_' + str(batch_val) + '_' + str(pred_1)])
        col_val_list.remove(label1)

        Xtest.columns = col_val_list
        Xtest.to_csv('mid_prediction' + '_' + str(set_val) + '_' + label1 + '_' + str(epoch_val) + '_' + str(batch_val) + '_' + str(pred_1) + '.csv')
        return None

    prediction_list2 = [1, 2, 3]
    for prediction2 in prediction_list2:
        neural_net_1(label_val, prediction2)

    sess.close()


neural_net(set_val=21, epoch_val=5,
           batch_val=24, label_val='label1')

但是,当我在ubuntu上运行此代码时,出现以下错误:

TypeError: Cannot convert bool to numpy.ndarray

在此行中:

Xtest['prediction' + str(pred_1) + str(label1)] = pred

而且,在这两种情况下

pred变量为:

<class 'numpy.ndarray'>

Xtest变量是:

<class 'pandas.core.frame.DataFrame'>

mac运行的是Python3.6和TensorFlow1.6-cpu。你知道吗

ubuntu运行的是Python3.5和TensorFlow1.5-gpu

如果有人能帮助我理解为什么错误发生在一个实例而不是另一个实例中,以及我如何可能修复它,我将不胜感激。非常感谢。你知道吗


Tags: csvtestimportadddatamodeltrainval
1条回答
网友
1楼 · 发布于 2024-04-19 04:44:12

要将列添加到数据帧,应该执行以下操作

col_name = 'prediction' + str(pred_1) + str(label1)
Xtest = Xtest.assign(col_name=pred)

相关问题 更多 >