RuntimeError:应为变量类型的对象[火炬浮子张量]但找到类型变量[torch.cuda.FloatTensor]对于参数1“mat2”

2024-06-16 12:21:16 发布

您现在位置:Python中文网/ 问答频道 /正文

当我运行Pythorch代码时,我得到以下错误信息:

class BatchRNN(nn.Module):
def __init__(self, input_size, hidden_size, rnn_type=nn.LSTM, 
        bidirectional=False, batch_norm=True, dropout = 0.1):
    super(BatchRNN, self).__init__()
    self.input_size = input_size
    self.hidden_size = hidden_size
    self.bidirectional = bidirectional
    self.batch_norm = SequenceWise(nn.BatchNorm1d(input_size)).cuda() if batch_norm else None
    self.rnn = rnn_type(input_size=input_size, hidden_size=hidden_size,
                        bidirectional=bidirectional, dropout = dropout, bias=False,batch_first=True).cuda()

def forward(self, x):
    if self.batch_norm is not None:
        x = self.batch_norm(x).cuda()
    x, _ = self.rnn(x)
    self.rnn.cuda().flatten_parameters()
    return x

class CTC_RNN(nn.Module):
def __init__(self, rnn_input_size, rnn_hidden_size, rnn_layers=1,
        rnn_type=nn.LSTM, bidirectional=True, 
        batch_norm=True, num_class=1232, drop_out = 0.1):
    super(CTC_RNN, self).__init__()
    self.rnn_input_size = rnn_input_size
    self.rnn_hidden_size = rnn_hidden_size
    self.rnn_layers = rnn_layers
    self.rnn_type = rnn_type
    self.num_class = num_class
    self.num_directions = 2 if bidirectional else 1
    #cnn
    self.conv1_cnn=nn.Conv2d(1,256,(2,rnn_input_size)).cuda()
    self.conv2_cnn=nn.Conv2d(1,256,(2,256)).cuda()
    self.fc_cnn=nn.Linear(256,rnn_hidden_size).cuda()
    self.softmax_cnn=torch.nn.Softmax().cuda()
    rnns = []
    rnn = BatchRNN(input_size=rnn_input_size, hidden_size=rnn_hidden_size, 
                    rnn_type=rnn_type, bidirectional=bidirectional, 
                    batch_norm=False).cuda()
    rnns.append(('0', rnn))
    for i in range(rnn_layers-1):
        rnn = BatchRNN(input_size=self.num_directions*rnn_hidden_size, 
                hidden_size=rnn_hidden_size, rnn_type=rnn_type, 
                bidirectional=bidirectional, dropout = drop_out, batch_norm = batch_norm).cuda()
        rnns.append(('%d' % (i+1), rnn))

    self.rnns = nn.Sequential(OrderedDict(rnns)).cuda()

    if batch_norm :
        fc = nn.Sequential(nn.BatchNorm1d(self.num_directions*rnn_hidden_size).cuda(),
                    nn.Linear(self.num_directions*rnn_hidden_size, rnn_hidden_size, bias=False).cuda()).cuda()
    else:
        fc = nn.Linear(self.num_directions*rnn_hidden_size, rnn_hidden_size, bias=False).cuda()
    self.fc = SequenceWise(fc).cuda()
    self.inference_log_softmax = InferenceBatchLogSoftmax().cuda()
    self.softmax=torch.nn.Softmax().cuda()
    #self.inference_softmax = InferenceBatchSoftmax()  
    #tddf fusion lstm
    self.tddf_lstm=nn.LSTMCell(rnn_hidden_size,rnn_hidden_size).cuda()
    self.fc_s=nn.Linear(rnn_hidden_size,2,bias=True).cuda()
    self.fc_c=nn.Linear(rnn_hidden_size,2,bias=True) .cuda()
    self.hx=Variable(torch.zeros(100,rnn_hidden_size),requires_grad=True).cuda()
    self.cx=Variable(torch.zeros(100,rnn_hidden_size),requires_grad=True).cuda()
    self.fc_tddf=nn.Linear(rnn_hidden_size,num_class).cuda()
def forward(self, x,y):  #x: packed padded sequence [x.data: the origin data] [x.batch_sizes: the batch_size of each frames] [x_len: type:list not torch.IntTensor] 
    #ipdb.set_trace()   
    x = self.rnns(x)
    x = self.fc(x)
    x = self.inference_log_softmax(x)#(max_step,batch_size,dim)
    x=x.transpose(0,1)
    #x = self.inference_softmax(x)
    y=self.conv1_cnn(y)
    #banben 2_relu
    y=F.relu(y)
    y=self.conv2_cnn(torch.transpose(y,1,3))
    y=F.relu(y)
    y=self.fc_cnn(torch.transpose(y,1,3))#(batch_size,1,max_step,dim)
    #y=torch.transpose(y,1,3)
    y=y.view(100,-1,self.rnn_hidden_size)
    y=torch.transpose(y,0,1)

    output=Variable(torch.zeros(x.cpu().data.numpy().shape[0],100,self.rnn_hidden_size)).cuda()
    for i in range(x.cpu().data.numpy().shape[0]):
        #ipdb.set_trace()
        if i==0:
            st=F.softmax(self.fc_s(self.hx))
            ct=F.sigmoid(self.fc_c(self.hx))
            at=st*ct
            tddf_input_i_x=x[i]*at[:,0].contiguous().view(100,1).expand(100,self.rnn_hidden_size)
            tddf_input_i_y=y[i]*at[:,1].contiguous().view(100,1).expand(100,self.rnn_hidden_size)
            tddf_input_i=tddf_input_i_x+tddf_input_i_y
            hx,cx=self.tddf_lstm(tddf_input_i,(self.hx,self.cx))
            output[i]=hx
        else:
            st=F.softmax(self.fc_s(hx))
            ct=F.sigmoid(self.fc_c(hx))
            at=st*ct
            tddf_input_i_x=x[i]*at[:,0].contiguous().view(100,1).expand(100,self.rnn_hidden_size)
            tddf_input_i_y=y[i]*at[:,1].contiguous().view(100,1).expand(100,self.rnn_hidden_size)
            tddf_input_i=tddf_input_i_x+tddf_input_i_y
            #tddf_input_i=x[i]*at[:,0].contiguous().view(100,1).expand(100,self.rnn_hidden_size)+y[i]*at[:1].contiguous().view(100,1).expand(100,self.rnn_hidden_size)
            hx,cx=self.tddf_lstm(tddf_input_i,(hx,cx))
            output[i]=hx
    return self.fc_tddf(output)

错误显示如下:

^{pr2}$

如果有人能帮我的话,我会很感激的!!!~~ 事实上,这在pytorch0.2.0上似乎有效(我现在使用的是0.3.0)


Tags: selfnorminputsizetypebatchnntorch