ValueError:您试图将包含23层的权重文件加载到包含26层的模型中

2024-05-15 05:53:13 发布

您现在位置:Python中文网/ 问答频道 /正文

这是一个常见的问题,我自己无法解决。GitHub仍然是一个悬而未决的问题

https://github.com/keras-team/keras/issues/10417

weights_path += '/unet_weights.hdf5'-将其更改为.tf

这个想法适用于Colab(更改文件格式),但我的模型和数据太大,即使是38GB的RAM也不够,它会崩溃

我升级和降级了我的tensorflowkeras,正如多个答案中所建议的那样,但这没有帮助

我如何修理我的模型

from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import plot_model
from tensorflow.keras import backend as K


def unet_model(n_classes=5, im_sz=160, n_channels=8, n_filters_start=32, growth_factor=2, upconv=True,
           class_weights=[0.2, 0.3, 0.1, 0.1, 0.3]):
droprate = 0.50
n_filters = n_filters_start
inputs = Input((im_sz, im_sz, n_channels))
#inputs = BatchNormalization()(inputs)
conv1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
pool1 = Dropout(droprate)(pool1)

n_filters *= growth_factor
pool1 = BatchNormalization()(pool1)
conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
pool2 = Dropout(droprate)(pool2)

n_filters *= growth_factor
pool2 = BatchNormalization()(pool2)
conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
pool3 = Dropout(droprate)(pool3)

n_filters *= growth_factor
pool3 = BatchNormalization()(pool3)
conv4 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
pool4 = Dropout(droprate)(pool4)


n_filters *= growth_factor
conv5 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv5)

n_filters //= growth_factor
if upconv:
    up6 = concatenate([Conv2DTranspose(n_filters, (2, 2), strides=(2, 2), padding='same')(conv5), conv4])
else:
    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
# up6 = BatchNormalization()(up6)
up6 = Dropout(droprate)(up6)
conv6 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv6)

n_filters //= growth_factor
if upconv:
    up7 = concatenate([Conv2DTranspose(n_filters, (2, 2), strides=(2, 2), padding='same')(conv6), conv3])
else:
    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
#up7 = BatchNormalization()(up7)
up7 = Dropout(droprate)(up7)
conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv7)

n_filters //= growth_factor
if upconv:
    up8 = concatenate([Conv2DTranspose(n_filters, (2, 2), strides=(2, 2), padding='same')(conv7), conv2])
else:
    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
#up8 = BatchNormalization()(up8)
up8 = Dropout(droprate)(up8)
conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv8)


n_filters //= growth_factor
if upconv:
    up9 = concatenate([Conv2DTranspose(n_filters, (2, 2), strides=(2, 2), padding='same')(conv8), conv1])
else:
    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
up9 = Dropout(droprate)(up9)
conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv9)

#output layer
conv10 = Conv2D(n_classes, (1, 1), activation='sigmoid')(conv9)

model = Model(inputs=inputs, outputs=conv10)

def weighted_binary_crossentropy(y_true, y_pred):
    class_loglosses = K.mean(K.binary_crossentropy(y_true, y_pred), axis=[0, 1, 2])
    return K.sum(class_loglosses * K.constant(class_weights))

model.compile(optimizer=Adam(), loss=weighted_binary_crossentropy)
return model


if __name__ == '__main__':
  model = unet_model()
  print(model.summary())
  plot_model(model, to_file='unet_model.png', show_shapes=True)

但我收到一个常见错误:

ValueError: You are trying to load a weight file containing 23 layers into a model with 26 layers

Tags: sizemodelactivationfiltersdropoutkerasrelusame

热门问题