在Pytorch中向Adam优化器状态添加神经元

2024-04-26 12:31:34 发布

您现在位置:Python中文网/ 问答频道 /正文

我也在Pytorch讨论板上发布了以下内容。我试图在向网络中添加更多神经元(到现有层,而不是新层)时,保持现有参数的每个参数的学习速率。我写了下面的课程,让我可以在训练过程中将神经元添加到隐藏层:

import torch
import torch.nn as nn

class DQN(nn.Module):
    def __init__(self, num_inputs, hidden, num_actions, non_linearity):
        super(DQN, self).__init__()

        self.num_inputs = num_inputs
        self.hidden = hidden
        self.num_actions = num_actions
        self.non_linearity = non_linearity

        self.layers = nn.ModuleList()
        self.layers.append(nn.Linear(num_inputs, self.hidden[0]))

        previous = self.hidden[0]
        for hidden_layer_size in self.hidden[1:]:
            self.layers.append(nn.Linear(previous, hidden_layer_size))
            previous = hidden_layer_size

        self.layers.append(nn.Linear(previous, num_actions))        

    def forward(self, x):
        for i in range(len(self.layers) - 1):
            x = self.non_linearity(self.layers[i](x))
        return self.layers[-1](x)

    def increase_capacity(self, increment):
        for i in range(len(self.hidden)):
            self.hidden[i] += increment[i]

        bias = self.layers[0].bias.data
        weight = self.layers[0].weight.data
        self.layers[0] = nn.Linear(self.num_inputs, self.hidden[0])
        if increment[0]>0:
            self.layers[0].weight.data[0:-increment[0],:] = weight
            self.layers[0].bias.data[0:-increment[0]] = bias
        else:
            self.layers[0].weight.data[0:,:] = weight
            self.layers[0].weight.data = bias

        for i in range(1, len(self.layers) - 1):
            bias = self.layers[i].bias.data
            weight = self.layers[i].weight.data
            self.layers[i] = nn.Linear(self.hidden[i-1], self.hidden[i])
            if increment[i] > 0:
                if increment[i-1] >0:
                    self.layers[i].bias.data[0:-increment[i]] = bias
                    self.layers[i].weight.data[0:-increment[i],0:-increment[i-1]] = weight
                else:
                    self.layers[i].bias.data[0:-increment[i]] = bias
                    self.layers[i].weight.data[0:-increment[i],0:] = weight
            else:
                if increment[i-1] >0:
                    self.layers[i].bias.data = bias
                    self.layers[i].weight.data[0:,0:-increment[i-1]] = weight
                else:
                    self.layers[i].bias.data = bias
                    self.layers[i].weight.data[0:,0:] = weight

        bias = self.layers[-1].bias.data
        weight = self.layers[-1].weight.data
        self.layers[-1] = nn.Linear(self.hidden[-1], self.num_actions)
        if increment[-1] >0:
            self.layers[-1].bias.data = bias
            self.layers[-1].weight.data[:,0:-increment[-1]] = weight
        else:
            self.layers[-1].bias.data = bias
            self.layers[-1].weight.data[:,0:] = weight

    def act(self, state, epsilon, mask):
        if np.random.rand() > epsilon:
            state = torch.tensor([state], dtype=torch.float32, device=device)
            mask = torch.tensor([mask], dtype=torch.float32, device=device)
            q_values = self.forward(state) + mask
            action = q_values.max(1)[1].view(1, 1).item()
        else:
            action =  np.random.randint(self.num_actions)
        return action

现在我已经写了一个小的健全性检查(是否导致健全在这一点上是值得怀疑的):一个网络有两个层和一个神经元都不能学习x-or函数,而一个网络有4个神经元已经被添加了。如果我初始化一个新的乐观主义者,这确实有效。我使用的优化程序是Adam,它跟踪每个参数的学习速率。我想保持Adam的学习率,在我添加额外的神经元之前已经存在的权重和偏差。以下是我失败的尝试:

import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np

# Credits to Alvations
def generate_zero():
    return random.uniform(0, 49) / 100

def generate_one():
    return random.uniform(50, 100) / 100


def generate_xor_XY(num_data_points):
    Xs, Ys = [], []
    for _ in range(num_data_points):
        # xor(0, 0) -> 0 
        Xs.append([generate_zero(), generate_zero()]); Ys.append([0])
        # xor(1, 0) -> 1
        Xs.append([generate_one(), generate_zero()]); Ys.append([1])
        # xor(0, 1) -> 1
        Xs.append([generate_zero(), generate_one()]); Ys.append([1])
        # xor(1, 1) -> 0
        Xs.append([generate_one(), generate_one()]); Ys.append([0])
    return Xs, Ys

# Initialisation
network = DQN(2,[1,1],1,F.relu)
# optimizer = optim.Adam(network.parameters(), amsgrad=False)
optimizer = optim.Adam(network.parameters(), amsgrad=True)
criterion = nn.MSELoss()

# Train 50000 steps to show 1 neuron cannot solve x-or task
for i in range(50000):
    optimizer.zero_grad()

    Xs, Ys = generate_xor_XY(1)
    Xs = torch.tensor(Xs)
    Ys = torch.tensor(Ys, dtype=torch.float)

    prediction = network(Xs)
    loss = criterion(prediction, Ys)

    loss.backward()
    optimizer.step()

print(network(torch.tensor([[1,0],[0,1],[1,1],[0,0]], dtype=torch.float)))
print(loss)

# Add 5 neurons to first layer
capacity = [4,4]
network.increase_capacity(capacity)

# Uncomment the following line and comment the lines following it for normal initialisation.
# optimizer = optim.Adam(network.parameters(), amsgrad=True)

nw_param = [p for p in network.parameters()]
new_param_group = []

layer_idx = 0
for idx, group in enumerate(optimizer.param_groups):
        for idx_p, p in enumerate(group['params']):
            # Save previous information
            prev_grad = p.grad
            old_p = copy.deepcopy(p)
            old_state = copy.copy(optimizer.state[p])
            old_step = old_state['step']
            old_exp_avg = old_state['exp_avg']
            old_exp_avg_sq = old_state['exp_avg_sq']
            old_max_exp_avg_sq = old_state['max_exp_avg_sq']

            # Remove old parameter from state
            optimizer.state.pop(p)

            # Weights
            if p.dim()>1:
                p = nn.Parameter(nw_param[layer_idx])
                p.grad = torch.zeros_like(p)
                new_exp_avg = torch.torch.zeros_like(p)
                new_exp_avg_sq = torch.torch.zeros_like(p)
                new_max_exp_avg_sq = torch.torch.zeros_like(p)
                p.grad[0:prev_grad.size(0),0:prev_grad.size(1)] = prev_grad                                                           

                optimizer.state[p]['step'] = old_step
                optimizer.state[p]['exp_avg'] = new_exp_avg
                optimizer.state[p]['exp_avg'][0:prev_grad.size(0),0:prev_grad.size(1)] = old_exp_avg
                optimizer.state[p]['exp_avg_sq'] = new_exp_avg_sq
                optimizer.state[p]['exp_avg_sq'][0:prev_grad.size(0),0:prev_grad.size(1)] = old_exp_avg_sq
                optimizer.state[p]['max_exp_avg_sq'] = new_max_exp_avg_sq
                optimizer.state[p]['max_exp_avg_sq'][0:prev_grad.size(0),0:prev_grad.size(1)] = old_max_exp_avg_sq
                new_param_group.append(p)

            # Biases
            else:
                p = nn.Parameter(nw_param[layer_idx])
                p.grad = torch.zeros_like(p)
                new_exp_avg = torch.zeros_like(p)
                new_exp_avg_sq = torch.zeros_like(p)
                new_max_exp_avg_sq = torch.zeros_like(p)
                p.grad[0:prev_grad.size(0)] = prev_grad

                optimizer.state[p]['step'] = old_step
                optimizer.state[p]['exp_avg'] = new_exp_avg
                optimizer.state[p]['exp_avg'][0:prev_grad.size(0)] = old_exp_avg
                optimizer.state[p]['exp_avg_sq'] = new_exp_avg_sq
                optimizer.state[p]['exp_avg_sq'][0:prev_grad.size(0)] = old_exp_avg_sq
                optimizer.state[p]['max_exp_avg_sq'] = new_max_exp_avg_sq
                optimizer.state[p]['max_exp_avg_sq'][0:prev_grad.size(0)] = old_max_exp_avg_sq
                new_param_group.append(p)

            layer_idx += 1

optimizer.param_groups[0]['params'] = new_param_group

print(network)

# Train 50000 steps to show by adding neurons the task can be solved
for i in range(50000):
    optimizer.zero_grad()

    Xs, Ys = generate_xor_XY(1)
    Xs = torch.tensor(Xs)
    Ys = torch.tensor(Ys, dtype=torch.float)

    prediction = network(Xs)
    loss = criterion(prediction, Ys)

    loss.backward()
    optimizer.step()

print(network(torch.tensor([[1,0],[0,1],[1,1],[0,0]], dtype=torch.float)))
print(loss)

我试图得到相同的优化器状态,但是为添加的神经元添加了额外的参数。这似乎是一个复杂的方式做它(它没有)工作:p). 有没有人知道一个(更简单的)方法来做这件事或看到我哪里出错了?你知道吗


Tags: selfnewdatalayerssqnntorchold