图中似乎没有使用一个不同的张量

2024-06-16 11:27:16 发布

您现在位置:Python中文网/ 问答频道 /正文

我想从lstm模型中获取输出并人工更新模型的参数(),但我得到以下错误:

Runtime ERROR: One of the differentiated Tensors appears to not have been used in the graph. Set allow_unused=True if this is the desired behavior.

我的RNN模型如下:

class RNN(nn.Module):
    def __init__(self):
        super(RNN,self).__init__()
        self.vars = nn.ParameterList() ## parameter need to be updated
        self.lstm = nn.LSTM(input_size=2,hidden_size=128,num_layers=1,batch_first=True)
        self.vars.extend(self.lstm.parameters())
        self.out = nn.Linear(128,5)
        self.vars.extend(self.out.parameters())
    def forward(self,x,params=None):
        if params==None:
            params=self.vars
        self.lstm.weight_ih_l0.data=params[0]
        self.lstm.weight_hh_l0.data=params[1]
        self.lstm.bias_ih_l0.data=params[2]
        self.lstm.bias_hh_l0.data=params[3]
        
        output, (hidden, cell)=self.lstm(x,None) 
        self.out.weight.data=params[4]
        self.out.bias.data=params[5]
    
        out = self.out(output[:,-1,:])

        return out 

    def parameters(self):
    
        return self.vars


y_hat = RNN(x_spt[i], params = None)  
loss = F.cross_entropy(y_hat, y_spt[i])   ## y_spt[i] is labels
grad = torch.autograd.grad(loss, RNN.parameters())

tuples = zip(grad, RNN.parameters())
fast_weights = list(map(lambda p: p[1] - base_lr * p[0], tuples)) ## update RNN.parameters()
        
for k in range(1, update_step):
    y_hat = RNN(x_spt[i], params = fast_weights)        
    loss = F.cross_entropy(y_hat, y_spt[i])
            
    grad = torch.autograd.grad(loss, fast_weights)  ## error occurs
    tuples = zip(grad, fast_weights) 
            
    fast_weights = list(map(lambda p: p[1] - base_lr * p[0], tuples))

Tags: selfnonedatannparamsvarsoutfast