训练神经网络时列出超出范围的索引

2024-04-19 08:52:53 发布

您现在位置:Python中文网/ 问答频道 /正文

我正在尝试使用ResNet实现卫星图像的超分辨率。我已经加载了所有的图像。我有2240个图像用于输入和输出,560个用于验证。我总是出现“列表索引超出范围”错误。是因为图像的大小吗?如果是这样,是否有任何方法可以更改代码中图像的大小

代码如下:

from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Convolution2D
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import UpSampling2D
import tensorflow as tf
from keras.models import Sequential
from keras.models import Model

train_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_indir = r"D:\\For_Training1\\train"
validation_indir = r"D:\\For_Training2\\val"
train_generator = train_datagen.flow_from_directory(
        train_indir,
        target_size = (3402,3401),
        class_mode='input')
valid_generator = validation_datagen.flow_from_directory(
            validation_indir,
            target_size = (3402,3401),
            class_mode = 'input')

train_outdir = r"D:\\For_Training3\\Train"
train_generator = train_datagen.flow_from_directory(
        train_outdir,
        target_size = (3402,3401),
        class_mode='input')

base_model = tf.keras.applications.ResNet50(
    include_top=False,
    weights="imagenet",
    input_shape=(3402,3401,3),
    pooling=None,  
)
for layer in base_model.layers[:46]:
    layer.trainable = False
model = Sequential()
model.add(base_model)
model.add(Convolution2D(3,9,activation='relu',padding='same'))
model.add(UpSampling2D())
model.add(UpSampling2D())
model.add(Convolution2D(3,9,activation='relu',padding='same'))




model.compile(optimizer="adam", loss='mean_squared_error', metrics=['mean_squared_error'])
model.fit(train_indir,train_outdir,validation_data = validation_indir,batch_size=32, epochs=100, verbose=0)

错误:

Found 2240 images belonging to 1 classes.
Found 560 images belonging to 1 classes.
Found 2240 images belonging to 1 classes.
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-5-7c9709ce4116> in <module>
     45 
     46 model.compile(optimizer="adam", loss='mean_squared_error', metrics=['mean_squared_error'])
---> 47 model.fit(train_indir,train_outdir,validation_data = validation_indir,batch_size=32, epochs=100, verbose=0)
     48 
     49 

~\anaconda3\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
   1106          training_utils.RespectCompiledTrainableState(self):
   1107       # Creates a `tf.data.Dataset` and handles batch and epoch iteration.
-> 1108       data_handler = data_adapter.get_data_handler(
   1109           x=x,
   1110           y=y,

~\anaconda3\lib\site-packages\keras\engine\data_adapter.py in get_data_handler(*args, **kwargs)
   1346   if getattr(kwargs["model"], "_cluster_coordinator", None):
   1347     return _ClusterCoordinatorDataHandler(*args, **kwargs)
-> 1348   return DataHandler(*args, **kwargs)
   1349 
   1350 

~\anaconda3\lib\site-packages\keras\engine\data_adapter.py in __init__(self, x, y, sample_weight, batch_size, steps_per_epoch, initial_epoch, epochs, shuffle, class_weight, max_queue_size, workers, use_multiprocessing, model, steps_per_execution, distribute)
   1136     adapter_cls = select_data_adapter(x, y)
   1137     self._verify_data_adapter_compatibility(adapter_cls)
-> 1138     self._adapter = adapter_cls(
   1139         x,
   1140         y,

~\anaconda3\lib\site-packages\keras\engine\data_adapter.py in __init__(self, x, y, sample_weights, sample_weight_modes, batch_size, shuffle, **kwargs)
    655         sample_weights, sample_weight_modes)
    656 
--> 657     self._internal_adapter = TensorLikeDataAdapter(
    658         x,
    659         y=y,

~\anaconda3\lib\site-packages\keras\engine\data_adapter.py in __init__(self, x, y, sample_weights, sample_weight_modes, batch_size, epochs, steps, shuffle, **kwargs)
    239     inputs = pack_x_y_sample_weight(x, y, sample_weights)
    240 
--> 241     num_samples = set(int(i.shape[0]) for i in tf.nest.flatten(inputs)).pop()
    242     _check_data_cardinality(inputs)
    243 

~\anaconda3\lib\site-packages\keras\engine\data_adapter.py in <genexpr>(.0)
    239     inputs = pack_x_y_sample_weight(x, y, sample_weights)
    240 
--> 241     num_samples = set(int(i.shape[0]) for i in tf.nest.flatten(inputs)).pop()
    242     _check_data_cardinality(inputs)
    243 

~\anaconda3\lib\site-packages\tensorflow\python\framework\tensor_shape.py in __getitem__(self, key)
    894       else:
    895         if self._v2_behavior:
--> 896           return self._dims[key].value
    897         else:
    898           return self._dims[key]

IndexError: list index out of range

Tags: sampleinfromimportselfdatasizeadapter