试图pickle ML模型无法pickle google colab中的_thread.RLock对象

2024-04-25 15:15:59 发布

您现在位置:Python中文网/ 问答频道 /正文

我正在google colab中使用CNN训练MNIST数据集,并希望使用pickle保存模型,当我尝试保存模型时,出现错误无法pickle\u thread.RLock objects

我的代码

import pickle
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D , MaxPooling2D, Dense, Flatten,Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split


testRatio = 0.2
valRatio = 0.2
imageDimensions = (28,28,3)

batchSizeVal = 50
EPOCHS = 2
stepsPerEpoch = 2000




(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_validation , y_train , y_validation = train_test_split(X_train, y_train, test_size= valRatio)


X_train = X_train.reshape((48000, 28, 28, 1))
X_test = X_test.reshape((10000, 28, 28, 1))
X_validation = X_validation.reshape((12000, 28, 28, 1))




dataGen = ImageDataGenerator(width_shift_range = 0.1,
                                   height_shift_range = 0.1,
                                   zoom_range = 0.2,
                                   shear_range = 0.1,
                                   rotation_range= 10)


dataGen.fit(X_train)

y_train = to_categorical(y_train,10)
y_test= to_categorical(y_test,10)
y_validation = to_categorical(y_validation,10)


def myModel():
    noOfFiters = 60
    sizeOfFilter1 = (5,5)
    sizeOfFilter2 = (3,3)
    sizeOfPool = (2,2)
    noOfNode = 500

    model = Sequential()
    model.add((Conv2D(noOfFiters, sizeOfFilter1,input_shape=(imageDimensions[0]
                                                             ,imageDimensions[1],
                                                             1),
                                                            activation = "relu")))

    model.add((Conv2D(noOfFiters, sizeOfFilter1, activation = "relu")))
    model.add(MaxPooling2D(pool_size=sizeOfPool))
    model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
    model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
    model.add(MaxPooling2D(pool_size=sizeOfPool))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(noOfNode,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10,activation='softmax'))
    model.compile(Adam(lr=0.001),loss='categorical_crossentropy',
                  metrics=['accuracy'])
    
    return model

model = myModel()




history = model.fit(dataGen.flow(X_train, y_train,
                                 batch_size= batchSizeVal),
                                 steps_per_epoch = stepsPerEpoch,
                                 epochs =EPOCHS,
                                 validation_data = (X_validation,y_validation),
                                 shuffle= True)

plt.figure(1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training ', 'validation'])
plt.title("Loss")
plt.xlabel('epoch')

plt.figure(2)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training ', 'validation'])
plt.title("Accuracy")
plt.xlabel('epoch')
plt.show()

score = model.evaluate(X_test,y_test,verbose=0)
print("Test Score = ",score[0])
print("Test Accuracy = ",score[1])


pickle_out = open("model_trained.pickle","wb" )
model = pickle.dump(model,pickle_out)
pickle_out.close()

我该怎么做才能让它工作呢。 我试图将运行时更改为cpu,因为我认为这是gpu造成的,但即使如此,它也无法工作


Tags: fromtestimportaddmodelrangetrainplt
1条回答
网友
1楼 · 发布于 2024-04-25 15:15:59

Keras不支持Pickle序列化其对象(模型)。基本上,如果一个对象有__getstate____setstate__方法,pickle will use them来序列化该对象。问题是Keras模型doesn't implement these

@Zach Moshe,提出了解决此问题的修补程序。有关更多详细信息,请参阅his blog

# Hotfix function
def make_keras_picklable():
    def __getstate__(self):
        model_str = ""
        with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
            save_model(self, fd.name, overwrite=True)
            model_str = fd.read()
        d = {'model_str': model_str}
        return d

    def __setstate__(self, state):
        with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
            fd.write(state['model_str'])
            fd.flush()
            model = load_model(fd.name)
        self.__dict__ = model.__dict__


    cls = Model
    cls.__getstate__ = __getstate__
    cls.__setstate__ = __setstate__

# Run the function
make_keras_picklable()

请参考下面的工作代码

import pickle
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.datasets import mnist
from keras.utils import to_categorical
from tensorflow.keras.models import Sequential, load_model, save_model, Model
from keras.layers import Conv2D , MaxPooling2D, Dense, Flatten,Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
import tempfile


testRatio = 0.2
valRatio = 0.2
imageDimensions = (28,28,3)

batchSizeVal = 50
EPOCHS = 2
stepsPerEpoch = 2000


(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_validation , y_train , y_validation = train_test_split(X_train, y_train, test_size= valRatio)


X_train = X_train.reshape((48000, 28, 28, 1))
X_test = X_test.reshape((10000, 28, 28, 1))
X_validation = X_validation.reshape((12000, 28, 28, 1))

 # Hotfix function
def make_keras_picklable():
    def __getstate__(self):
        model_str = ""
        with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
            save_model(self, fd.name, overwrite=True)
            model_str = fd.read()
        d = {'model_str': model_str}
        return d

    def __setstate__(self, state):
        with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
            fd.write(state['model_str'])
            fd.flush()
            model = load_model(fd.name)
        self.__dict__ = model.__dict__


    cls = Model
    cls.__getstate__ = __getstate__
    cls.__setstate__ = __setstate__

# Run the function
make_keras_picklable()


dataGen = ImageDataGenerator(width_shift_range = 0.1,
                                   height_shift_range = 0.1,
                                   zoom_range = 0.2,
                                   shear_range = 0.1,
                                   rotation_range= 10)


dataGen.fit(X_train)

y_train = to_categorical(y_train,10)
y_test= to_categorical(y_test,10)
y_validation = to_categorical(y_validation,10)


def myModel():
    noOfFiters = 60
    sizeOfFilter1 = (5,5)
    sizeOfFilter2 = (3,3)
    sizeOfPool = (2,2)
    noOfNode = 500

    model = Sequential()
    model.add((Conv2D(noOfFiters, sizeOfFilter1,input_shape=(imageDimensions[0]
                                                             ,imageDimensions[1],
                                                             1),
                                                            activation = "relu")))

    model.add((Conv2D(noOfFiters, sizeOfFilter1, activation = "relu")))
    model.add(MaxPooling2D(pool_size=sizeOfPool))
    model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
    model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
    model.add(MaxPooling2D(pool_size=sizeOfPool))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(noOfNode,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10,activation='softmax'))
    model.compile(Adam(lr=0.001),loss='categorical_crossentropy',
                  metrics=['accuracy'])
    
    return model

model = myModel()


history = model.fit(dataGen.flow(X_train, y_train,
                                 batch_size= batchSizeVal),
                                 steps_per_epoch = X_train.shape[0]//batchSizeVal,
                                 epochs =EPOCHS,
                                 validation_data = (X_validation,y_validation),
                                 shuffle= True)

score = model.evaluate(X_test,y_test,verbose=0)
print("Test Score = ",score[0])
print("Test Accuracy = ",score[1])

with open('model.pkl', 'wb') as f:
    pickle.dump(model, f)

输出:

Epoch 1/2
960/960 [==============================] - 338s 352ms/step - loss: 1.0066 - accuracy: 0.6827 - val_loss: 0.1417 - val_accuracy: 0.9536
Epoch 2/2
960/960 [==============================] - 338s 352ms/step - loss: 0.3542 - accuracy: 0.8905 - val_loss: 0.0935 - val_accuracy: 0.9719


Test Score =  0.07476004958152771
Test Accuracy =  0.9761999845504761

相关问题 更多 >