Tensorflow 2:维度1的切片索引64超出范围。[Op:Straddslice]名称:标题_生成器_5/跨步切片/

2024-04-25 08:04:12 发布

您现在位置:Python中文网/ 问答频道 /正文

下面是我的代码,为视频序列创建标题

''导入tensorflow作为tf 将tensorflow.keras导入为keras 将numpy作为np导入 打印(tf.版本

类字嵌入(tf.keras.layers.Layer): definit(自我,n字,暗显): super(单词嵌入,self)。init()

    self.wordEmbed = self.add_variable(name='wordEmbed', shape=(n_words, dim_hidden), trainable=True)

def build(self, input_shape):
    self.wordEmbed.assign(tf.random.uniform(minval=-0.1,maxval=0.1,seed=10,shape=self.wordEmbed.shape,dtype=tf.float32))

def call(self, inputs, **kwargs):
    out=tf.nn.embedding_lookup(self.wordEmbed, inputs)
    return out

类别标题生成器(tf.keras.Model): definit(自我,n个字,批量大小,尺寸特征=512,尺寸隐藏=512,n个视频=80, n_caption_lstm=20,bias_init_vector=None): 超级(字幕生成器,自身)。init() self.n_单词=n_单词 self.dim\u feature=dim\u feature self.dim\u hidden=dim\u hidden self.n_video_lstm=n_video_lstm self.n_caption_lstm=n_caption_lstm self.batch\u size=批大小 self.wordEmbed=WordEmbeding(n个单词,隐藏) self.wordEmbed.build(输入\形状=(无,))

    self.dense_feature=keras.layers.Dense(units=dim_hidden,name='dense_feature')
    self.dense_feature.build(input_shape=(None,dim_feature))

    self.lstm1=keras.layers.LSTMCell(units=dim_hidden,name='lstm_video')
    self.lstm1.build(input_shape=(batch_size,dim_hidden))
    self.lstm2=keras.layers.LSTMCell(units=dim_hidden,name='lstm_caption')
    self.lstm2.build(input_shape=(batch_size, dim_hidden*2))

    self.dense_output=keras.layers.Dense(units=n_words,
                                         name='dense_output')
    self.dense_output.build(input_shape=(None,dim_hidden))
    if bias_init_vector is not None:
        self.dense_output.bias.assign_add(bias_init_vector)

def call(self,X,Y=None,Y_mask=None):
    if Y is not None:
        return self.train(X,Y,Y_mask)  # loss
    else:
        return self.predict(X)   # result

def train(self,X,Y,Y_mask):
    self.state1 = self.lstm1.get_initial_state(batch_size=self.batch_size, dtype=tf.float32)
    self.state2 = self.lstm2.get_initial_state(batch_size=self.batch_size, dtype=tf.float32)
    self.padding = tf.zeros([self.batch_size, self.dim_hidden])
    X = tf.reshape(X, shape=(-1, self.dim_feature))  # (batch_size*T,dim_feature)
    X = self.dense_feature(X)  # (batch_size*T,dim_hidden)
    X = tf.reshape(X, shape=(self.batch_size, -1, self.dim_hidden))
    # encoding video
    losses=0.0
    for i in range(self.n_video_lstm):
        output1, self.state1 = self.lstm1(X[:, i, :], self.state1)
        output2, self.state2 = self.lstm2(tf.concat([output1, self.padding], 1), self.state2)

    # decoding
    for i in range(self.n_caption_lstm + 1):
        with tf.device('cpu:0'):
            current_embed = self.wordEmbed(Y[:, i])  # tf.gather
        output1, self.state1 = self.lstm1(self.padding, self.state1)
        output2, self.state2 = self.lstm2(tf.concat([output1, current_embed], 1), self.state2)

        labels=Y[:,i+1]
        onehot_labels=tf.one_hot(labels,depth=self.n_words)

        logit_words=self.dense_output(output2)
        cross_entropy=tf.nn.softmax_cross_entropy_with_logits(labels=onehot_labels,logits=logit_words)
        cross_entropy=cross_entropy*Y_mask[:,i]
        current_loss=tf.reduce_mean(cross_entropy)
        losses+=current_loss
    return losses

def predict(self,X):
    batch_size=X.shape[0]
    self.state1 = self.lstm1.get_initial_state(batch_size=batch_size, dtype=tf.float32)
    self.state2 = self.lstm2.get_initial_state(batch_size=batch_size, dtype=tf.float32)
    self.padding = tf.zeros([X.shape[0], self.dim_hidden])
    X = tf.reshape(X, shape=(-1, self.dim_feature))  # (batch_size*T,dim_feature)
    X = self.dense_feature(X)  # (batch_size*T,dim_hidden)
    X = tf.reshape(X, shape=(batch_size, -1, self.dim_hidden))
    # encoding video
    for i in range(self.n_video_lstm):
        output1, self.state1 = self.lstm1(X[:, i, :], self.state1)
        output2, self.state2 = self.lstm2(tf.concat([output1, self.padding], 1), self.state2)

    # decoding
    generated_words=[]
    for i in range(self.n_caption_lstm + 1):
        if i==0:
            with tf.device('cpu:0'):
                current_embed = self.wordEmbed(tf.ones([batch_size],dtype=tf.int64))
        output1, self.state1 = self.lstm1(self.padding, self.state1)
        output2, self.state2 = self.lstm2(tf.concat([output1, current_embed], 1), self.state2)


        logit_words = self.dense_output(output2)
        max_prob_index=tf.argmax(logit_words,axis=-1)
        with tf.device('cpu:0'):
            current_embed=self.wordEmbed(max_prob_index)
        generated_words.append(max_prob_index.numpy())

    return np.array(generated_words).T'''

我得到以下错误

维度1的切片索引64超出范围。[Op:Straddslice]名称:标题_生成器_5/跨步_切片/

我的输入是一个视频功能“.npy”文件,具有(64512)功能


Tags: selfsizetfbatchhiddenfeaturekerasdense
1条回答
网友
1楼 · 发布于 2024-04-25 08:04:12

此行中的错误:

output1, self.state1 = self.lstm1(X[:, i, :], self.state1)

这里的“i”是一个帧编号(从0到79)。但self.lstm的1 dim是一个特性(从0到63)

相关问题 更多 >