ValueError:检查目标时出错:预期密集\u 2有2个维度,但得到了形状为(1,)的数组

2024-04-18 23:54:04 发布

您现在位置:Python中文网/ 问答频道 /正文

我正在准备一些机器学习来识别给定图像是否包含特征(1)或不包含特征(0)。但是标签的形状和模型的输出似乎是不同的

所有信息都包含在tensorflow数据集中:

path_ds = tf.data.Dataset.from_tensor_slices(allImages)
img_ds = path_ds.map(preprocess_image)
label_ds = tf.data.Dataset.from_tensor_slices(labels)
label_ds = label_ds.map(conversion)
ds = tf.data.Dataset.zip((img_ds, label_ds))

ds = ds.shuffle(buffer_size=image_count).repeat().batch(5).prefetch(10)

iterator = ds.make_one_shot_iterator()
ds_x, ds_y = iterator.get_next()

模型如下:

model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(704, 480, 3)))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, (3,3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, (3,3), activation='relu'))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='softmax'))

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model.fit(ds_x, ds_y, epochs=3, steps_per_epoch=1, verbose=3)

所有图像和标签都是列表。所有图像都包含指向图像的路径,这些路径稍后将进行预处理,标签是1或0的列表

我收到的错误消息如下:

ValueError:检查目标时出错:预期密集\u 2有2个维度,但得到了形状为(1,)的数组

完整代码:

import tensorflow as tf
import os
import glob
import csv
import lab
from PIL import ImageFile
import datetime
import pandas as pd


def preprocess_image(path):
    img = tf.read_file(path)
    img = tf.image.decode_jpeg(img, channels=3, try_recover_truncated=True, acceptable_fraction=0.9)
    img = tf.cast(img, tf.float32)
    return img


def conversion(label):
    label = tf.cast(label, tf.float32)
    return label


def get_keys(d):
    if not d.keys():
        return None
    if list(d.keys())[0] == 'yes':
        return 1
    if list(d.keys())[0] == 'no':
        return 0

def change_range(image,label):
  return 2*image-1, label


print("Start", datetime.datetime.now())

orig_path = 'PATH'
CAM = 'CAM'

allImages = []
labels = []

print("Parse labels", datetime.datetime.now())
for folders in os.listdir(orig_path):
    print("Reading one folder")
    df = pd.read_csv(orig_path + '/' + folders + '/' + folders + '.csv')
    df.drop(['file_size', 'region_count', 'region_id', 'region_shape_attributes', 'region_attributes'], axis=1, inplace=True)
    df.file_attributes = df.file_attributes.str.replace('true', 'True')
    labelsDF = pd.DataFrame(df.file_attributes.apply(eval).values.tolist())
    mask = labelsDF.time == 'day'
    labels2 = pd.DataFrame(labelsDF[mask].drop(['cloud', 'feature', 'light', 'problems', 'time'], axis=1).eruption.apply(get_keys))
    df.file_attributes = labels2
    df = df[mask]
    df.dropna(inplace=True)
    mask2 = df['filename'].isin(os.listdir(orig_path + '/' + folders))
    df = df[mask2]
    df.filename = '/home/mitiga/Images/' + folders +'/' + df.filename
    labels = labels + df['file_attributes'].tolist()
    allImages = allImages + df['filename'].tolist()

image_count = len(allImages)

path_ds = tf.data.Dataset.from_tensor_slices(allImages)
img_ds = path_ds.map(preprocess_image)
label_ds = tf.data.Dataset.from_tensor_slices(labels)
label_ds = label_ds.map(conversion)
print(label_ds.output_shapes)
ds = tf.data.Dataset.zip((img_ds, label_ds))

ds = ds.shuffle(buffer_size=image_count).repeat().batch(5).prefetch(10)

iterator = ds.make_one_shot_iterator()
ds_x, ds_y = iterator.get_next()

model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(704, 480, 3)))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, (3,3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, (3,3), activation='relu'))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='softmax'))

print(model.summary())
print(ds_y.shape)

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model.fit(ds_x, ds_y, epochs=3, steps_per_epoch=1, verbose=3)


Tags: pathimageimportadddfimgmodellayers