pytorch BCELoss()如何将输入大小更改为目标大小?

2024-04-25 22:18:10 发布

您现在位置:Python中文网/ 问答频道 /正文

基本代码:


loss_fn = nn.BCELoss() ##nn.CrossEntropyLoss() is ok

def train_loop(dataloader, model, loss_fn, optimizer):
    for batch, (X, y) in enumerate(dataloader):
        X,y=X.float(),y.long()
        predict=model(X)
        loss=loss_fn(predict,y)   #<==ERROR

        optimizer.zero_grad()
        loss.backward()# Calculate Gradients
        optimizer.step()# Update Weights

`ValueError: Using a target size (torch.Size([64])) that is different to the input size (torch.Size([64, 2])) is deprecated. Please ensure they have the same size.`  

代码根据从pytorch官方网站下载的代码进行更改
我将nn.CrossEntropyLoss()与运行完美的代码一起使用
我已经将(512,2)更改为(512,1),但是错误从(torch.Size([64, 2]))更改为(torch.Size([64, 1])),然后我保存了一些简单的问题,然后损失越来越大,更改的代码位于底部
数据的形状是[n,27]
标签的形状是[n,1]

完整代码:


import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.nn as nn

class myDataset(Dataset):
    def __init__(self,data,label):
        df = pd.read_csv(data, encoding='gbk')
        df = df.fillna(value=0)
        self.data = np.array(df)

        df = pd.read_csv(label, encoding='gbk')
        df = df.fillna(value=0)
        self.label = np.array(df).reshape(-1)

        #self.transform = transform
        #self.target_transform = target_transform

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx], self.label[idx]

class Network(nn.Module):
    def __init__(self):
        super(Network, self).__init__()
        self.flatten = nn.Flatten()
        self.network = nn.Sequential(
            #nn.Conv2d(in_channels=1, out_channels=6,kernel_size=5),
            nn.Linear(27, 100),
            nn.ReLU(),
            nn.Linear(100, 512),
            nn.ReLU(),
            nn.Linear(512, 512),
            nn.ReLU(),
            nn.Linear(512, 2),
            nn.ReLU()
        )
    def forward(self, x):
        x=self.flatten(x)
        return self.network(x)

def train_loop(dataloader, model, loss_fn, optimizer):
    for batch, (X, y) in enumerate(dataloader):
        #X,y=X.to(device), y.to(device)
        X,y=X.float(),y.long()
        predict=model(X)
        loss=loss_fn(predict,y)

        #反向传播
        optimizer.zero_grad()
        loss.backward()# Calculate Gradients
        optimizer.step()# Update Weights

        if batch % 100 == 0:
            loss, current = loss.item(), batch * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{len(dataloader.dataset):>5d}]")

def test_loop(dataloader, model, loss_fn):
    size = len(dataloader.dataset)
    test_loss, correct = 0, 0

    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.float(), y.long()
            predict = model(X)
            test_loss += loss_fn(predict, y).item()
            correct += (predict.argmax(1) == y).type(torch.float).sum().item()

    test_loss /= size
    correct /= size
    print(f"Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")

model=Network()#.to(device)
batch_size = 64
learning_rate = 1e-3
epochs = 5
loss_fn = nn.BCELoss() ##nn.CrossEntropyLoss() is ok
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

trainDataloader = DataLoader(myDataset("mydata/traindata.csv","mydata/trainlabel.csv"),batch_size=batch_size,shuffle=True)
train_loop(trainDataloader, model, loss_fn, optimizer)

更改代码:

import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.nn as nn

class myDataset(Dataset):
    def __init__(self,data,label):#, annotations_file, img_dir, transform=None, target_transform=None):
        df = pd.read_csv(data, encoding='gbk')
        df = df.fillna(value=0)
        self.data = np.array(df)

        df = pd.read_csv(label, encoding='gbk')
        df = df.fillna(value=0)
        self.label = np.array(df).reshape(-1)

        #self.transform = transform
        #self.target_transform = target_transform

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx], self.label[idx]

class Network(nn.Module):
    def __init__(self):
        super(Network, self).__init__()
        self.flatten = nn.Flatten()
        self.network = nn.Sequential(
            #nn.Conv2d(in_channels=1, out_channels=6,kernel_size=5),
            nn.Linear(27, 100),
            nn.ReLU(),
            nn.Linear(100, 512),
            nn.ReLU(),
            nn.Linear(512, 512),
            nn.ReLU(),
            nn.Linear(512, 1),
            nn.ReLU()
        )
    def forward(self, x):
        x=self.flatten(x)
        return self.network(x)

def train_loop(dataloader, model, loss_fn, optimizer):
    for batch, (X, y) in enumerate(dataloader):
        #X,y=X.to(device), y.to(device)
        X,y=X.float(),y.float()
        predict=model(X)
        loss=loss_fn(predict.reshape(-1),y)

        #反向传播
        optimizer.zero_grad()
        loss.backward()# Calculate Gradients
        optimizer.step()# Update Weights

        if batch % 100 == 0:
            loss, current = loss.item(), batch * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{len(dataloader.dataset):>5d}]")

def test_loop(dataloader, model, loss_fn):
    size = len(dataloader.dataset)
    test_loss, correct = 0, 0

    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.float(), y.float()
            predict = model(X)
            test_loss += loss_fn(predict.reshape(-1), y).item()
            correct += (predict.argmax(1) == y).type(torch.float).sum().item()

    test_loss /= size
    correct /= size
    print(f"Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")

model=Network()#.to(device)
batch_size = 64
learning_rate = 1e-3
epochs = 5
loss_fn = nn.BCELoss() ##nn.CrossEntropyLoss() is ok
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

trainDataloader = DataLoader(myDataset("mydata/traindata.csv","mydata/trainlabel.csv"),batch_size=batch_size,shuffle=True)
train_loop(trainDataloader, model, loss_fn, optimizer)

loss: 37.500000  [921600/1000000]
loss: 45.312500  [928000/1000000]
loss: 42.187500  [934400/1000000]
loss: 53.125000  [940800/1000000]
loss: 48.437500  [947200/1000000]
loss: 51.562500  [953600/1000000]
loss: 43.750000  [960000/1000000]
loss: 48.437500  [966400/1000000]
loss: 40.625000  [972800/1000000]
loss: 45.312500  [979200/1000000]
loss: 43.750000  [985600/1000000]
loss: 42.187500  [992000/1000000]
loss: 48.437500  [998400/1000000]

Tags: importselfdfdatasizemodeldefbatch
1条回答
网友
1楼 · 发布于 2024-04-25 22:18:10

您提到目标/标签是单整数值,但网络的最后一层预测的标签具有2个坐标。所以你要求BCEloss比较不同形状的张量,the documentation很清楚它是被禁止的(错误也很明显)

只要用nn.Linear(512, 1)替换网络的最后一层,错误就会消失

相关问题 更多 >