如何阅读此修改的unet?

2024-05-16 18:12:19 发布

您现在位置:Python中文网/ 问答频道 /正文

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from PIL import Image
import matplotlib.pyplot as plt

class Model_Down(nn.Module):
    """
    Convolutional (Downsampling) Blocks.

    nd = Number of Filters
    kd = Kernel size

    """
    def __init__(self,in_channels, nd = 128, kd = 3, padding = 1, stride = 2):
        super(Model_Down,self).__init__()
        self.padder = nn.ReflectionPad2d(padding)
        self.conv1 = nn.Conv2d(in_channels = in_channels, out_channels = nd, kernel_size = kd, stride = stride)
        self.bn1 = nn.BatchNorm2d(nd)

        self.conv2 = nn.Conv2d(in_channels = nd, out_channels = nd, kernel_size = kd, stride = 1)
        self.bn2 = nn.BatchNorm2d(nd)

        self.relu = nn.LeakyReLU()

    def forward(self, x):
        x = self.padder(x)
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.padder(x)
        x = self.conv2(x)
        x = self.bn2(x)
        x = self.relu(x)
        return x
class Model_Skip(nn.Module):
    """

    Skip Connections

    ns = Number of filters
    ks = Kernel size

    """
    def __init__(self,in_channels = 128, ns = 4, ks = 1, padding = 0, stride = 1):
        super(Model_Skip, self).__init__()
        self.conv = nn.Conv2d(in_channels = in_channels, out_channels = ns, kernel_size = ks, stride = stride, padding = padding)
        self.bn = nn.BatchNorm2d(ns)
        self.relu = nn.LeakyReLU()

    def forward(self,x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)
        return x

class Model_Up(nn.Module):
    """
    Convolutional (Downsampling) Blocks.

    nd = Number of Filters
    kd = Kernel size

    """
    def __init__(self, in_channels = 132, nu = 128, ku = 3, padding = 1):
        super(Model_Up, self).__init__()
        self.bn1 = nn.BatchNorm2d(in_channels)
        self.padder = nn.ReflectionPad2d(padding)
        self.conv1 = nn.Conv2d(in_channels = in_channels, out_channels = nu, kernel_size = ku, stride = 1, padding = 0)
        self.bn2 = nn.BatchNorm2d(nu)

        self.conv2 =  nn.Conv2d(in_channels = nu, out_channels = nu, kernel_size = 1, stride = 1, padding = 0) #According to supmat.pdf ku = 1 for second layer
        self.bn3 = nn.BatchNorm2d(nu)

        self.relu = nn.LeakyReLU()

    def forward(self,x):
        x = self.bn1(x)
        x = self.padder(x)
        x = self.conv1(x)
        x = self.bn2(x)
        x = self.relu(x)
        x = self.conv2(x)
        x = self.bn3(x)
        x = self.relu(x)
        x = F.interpolate(x, scale_factor = 2, mode = 'bilinear')
        return x

class Model(nn.Module):
    def __init__(self, length = 5, in_channels = 32, out_channels = 3, nu = [128,128,128,128,128] , nd =
                    [128,128,128,128,128], ns = [4,4,4,4,4], ku = [3,3,3,3,3], kd = [3,3,3,3,3], ks = [1,1,1,1,1]):
        super(Model,self).__init__()
        assert length == len(nu), 'Hyperparameters do not match network depth.'

        self.length = length

        self.downs = nn.ModuleList([Model_Down(in_channels = nd[i-1], nd = nd[i], kd = kd[i]) if i != 0 else
                                        Model_Down(in_channels = in_channels, nd = nd[i], kd = kd[i]) for i in range(self.length)])

        self.skips = nn.ModuleList([Model_Skip(in_channels = nd[i], ns = ns[i], ks = ks[i]) for i in range(self.length)])

        self.ups = nn.ModuleList([Model_Up(in_channels = ns[i]+nu[i+1], nu = nu[i], ku = ku[i]) if i != self.length-1 else
                                        Model_Up(in_channels = ns[i], nu = nu[i], ku = ku[i]) for i in range(self.length-1,-1,-1)]) #Elements ordered backwards

        self.conv_out = nn.Conv2d(nu[0],out_channels,1,padding = 0)
        self.sigm = nn.Sigmoid()

    def forward(self,x):
        s = [] #Skip Activations

        #Downpass
        for i in range(self.length):
            x = self.downs[i].forward(x)
            s.append(self.skips[i].forward(x))

        #Uppass
        for i in range(self.length):
            if (i == 0):
                x = self.ups[i].forward(s[-1])
            else:
                x = self.ups[i].forward(torch.cat([x,s[self.length-1-i]],axis = 1))

        x = self.sigm(self.conv_out(x)) #Squash to RGB ([0,1]) format
        return x

这段代码是我正在处理的一个修改过的UNet。我面临着难以阅读和理解的代码,以及如何将跳过连接连接到上采样。请任何人解释一下,或者用更简单易懂的方式写,而不需要nn.ModuleList

有人能用图表展示一下这个网络的样子吗

这是githublinkrepo链接,我从中获取了这段代码并试图理解它


Tags: inselfsizemodeldefnnoutlength
1条回答
网友
1楼 · 发布于 2024-05-16 18:12:19

这里是主模型forward(x)方法的函数等价物。它要详细得多,但它正在“分解”操作流程,使其更容易理解

我假设列表参数的长度总是5(我在[0,4]范围内,包括在内),因此我可以正确地解压(它遵循默认的参数集)

def unet_function(x, in_channels = 32, out_channels = 3, nu = [128,128,128,128,128],
                  nd = [128,128,128,128,128], ns = [4,4,4,4,4], ku = [3,3,3,3,3],
                  kd = [3,3,3,3,3], ks = [1,1,1,1,1]):


    ################################
    # DOWN PASS ####################
    ################################

    #########
    # i = 0 #
    #########

    # First Down
    # Model_Down(in_channels = in_channels, nd = nd[i], kd = kd[i])
    x = nn.ReflectionPad2d(padding=1)(x)
    x = nn.Conv2D(in_channels=in_channels, out_channels=nd[0], kernel_size=kd[0], stride=2)(x)
    x = nn.BatchNorm2d(nd[0])(x)
    x = nn.LeakyRelu()(x)
    x = nn.ReflectionPad2d(padding=1)(x)
    x = nn.Conv2d(in_channels = nd[0], out_channels=nd[0], kernel_size = kd[0], stride=1)(x)
    x = nn.BatchNorm2d(nd[0])(x)
    x = nn.LeakyRelu()(x)

    # First skip
    # Model_Skip(in_channels = nd[i], ns = ns[i], ks = ks[i])
    s0 = nn.Conv2D(in_channels=nd[0], out_channels=ns[0])(x)
    s0 = nn.BatchNorm2d(ns[0])(s0)
    s0 = nn.LeakyreLU()(s0)


    #########
    # i = 1 #
    #########

    # Second Down
    # Model_Down(in_channels = nd[i-1], nd = nd[i], kd = kd[i])
    x = nn.ReflectionPad2d(padding=1)(x)
    x = nn.Conv2D(in_channels=nd[0], out_channels=nd[0], kernel_size=kd[1], stride=2)(x)
    x = nn.BatchNorm2d(nd[0])(x)
    x = nn.LeakyRelu()(x)
    x = nn.ReflectionPad2d(padding=1)(x)
    x = nn.Conv2d(in_channels = nd[0], out_channels=nd[0], kernel_size = kd[1], stride=1)(x)
    x = nn.BatchNorm2d(nd[0])(x)
    x = nn.LeakyRelu()(x)

    # Second skip
    # Model_Skip(in_channels = nd[i], ns = ns[i], ks = ks[i])
    s1 = nn.Conv2D(in_channels=nd[1], out_channels=ns[1])(x)
    s1 = nn.BatchNorm2d(ns[1])(s1)
    s1 = nn.LeakyreLU()(s1)


    #########
    # i = 2 #
    #########

    # Third Down
    # Model_Down(in_channels = nd[i-1], nd = nd[i], kd = kd[i])
    x = nn.ReflectionPad2d(padding=1)(x)
    x = nn.Conv2D(in_channels=nd[1], out_channels=nd[1], kernel_size=kd[2], stride=2)(x)
    x = nn.BatchNorm2d(nd[1])(x)
    x = nn.LeakyRelu()(x)
    x = nn.ReflectionPad2d(padding=1)(x)
    x = nn.Conv2d(in_channels = nd[1], out_channels=nd[0], kernel_size = kd[2], stride=1)(x)
    x = nn.BatchNorm2d(nd[1])(x)
    x = nn.LeakyRelu()(x)

    # Third skip
    # Model_Skip(in_channels = nd[i], ns = ns[i], ks = ks[i])
    s2 = nn.Conv2D(in_channels=nd[2], out_channels=ns[2])(x)
    s2 = nn.BatchNorm2d(ns[2])(s2)
    s2 = nn.LeakyreLU()(s2)


    #########
    # i = 3 #
    #########

    # Fourth Down
    # Model_Down(in_channels = nd[i-1], nd = nd[i], kd = kd[i])
    x = nn.ReflectionPad2d(padding=1)(x)
    x = nn.Conv2D(in_channels=nd[2], out_channels=nd[2], kernel_size=kd[3], stride=2)(x)
    x = nn.BatchNorm2d(nd[2])(x)
    x = nn.LeakyRelu()(x)
    x = nn.ReflectionPad2d(padding=1)(x)
    x = nn.Conv2d(in_channels = nd[2], out_channels=nd[2], kernel_size = kd[3], stride=1)(x)
    x = nn.BatchNorm2d(nd[2])(x)
    x = nn.LeakyRelu()(x)

    # Fourth skip
    # Model_Skip(in_channels = nd[i], ns = ns[i], ks = ks[i])
    s3 = nn.Conv2D(in_channels=nd[3], out_channels=ns[3])(x)
    s3 = nn.BatchNorm2d(ns[3])(s3)
    s3 = nn.LeakyreLU()(s3)


    #########
    # i = 4 #
    #########

    # Fifth Down
    # Model_Down(in_channels = nd[i-1], nd = nd[i], kd = kd[i])
    x = nn.ReflectionPad2d(padding=1)(x)
    x = nn.Conv2D(in_channels=nd[3], out_channels=nd[3], kernel_size=kd[4], stride=2)(x)
    x = nn.BatchNorm2d(nd[3])(x)
    x = nn.LeakyRelu()(x)
    x = nn.ReflectionPad2d(padding=1)(x)
    x = nn.Conv2d(in_channels = nd[3], out_channels=nd[3], kernel_size = kd[4], stride=1)(x)
    x = nn.BatchNorm2d(nd[2])(x)
    x = nn.LeakyRelu()(x)

    # Fifth skip
    # Model_Skip(in_channels = nd[i], ns = ns[i], ks = ks[i])
    x = nn.Conv2D(in_channels=nd[4], out_channels=ns[4])(x)
    x = nn.BatchNorm2d(ns[4])(x)
    x = nn.LeakyreLU()(x)



    ################################
    # UP PASS ######################
    ################################

    #########
    # i = 4 #
    #########

    # First Up
    # Model_Up(in_channels = ns[i], nu = nu[i], ku = ku[i])
    x = nn.BatchNorm2d(in_channel=ns[4])(x)
    x = nn.ReflectionPad2d(padding)(x)
    x = nn.Conv2d(in_channels=ns[4], out_channels=nu[4], kernel_size=ku[4], stride=1, padding=0)(x)
    x = nn.BatchNorm2d(nu[4])(x)
    x = nn.LeakyReLU()(x)
    x = nn.Conv2d(in_channels = nu[4], out_channels=nu[4], kernel_size = 1, stride = 1, padding = 0)(x)
    x = nn.BatchNorm2d(nu[4])(x)
    x = nn.LeakyReLU()(x)
    x = F.interpolate(x, scale_factor = 2, mode = 'bilinear')


    #########
    # i = 3 #
    #########

    # Second Up
    # self.ups[i].forward(torch.cat([x,s[self.length-1-i]],axis = 1))
    x = torch.cat([x,s3], axis=1) # IMPORTANT HERE
    # Model_Up(in_channels = ns[i]+nu[i+1], nu = nu[i], ku = ku[i])
    x = nn.BatchNorm2d(in_channel=ns[3]+nu[4])(x)
    x = nn.ReflectionPad2d(padding)(x)
    x = nn.Conv2d(in_channels=ns[3]+nu[4], out_channels=nu[3], kernel_size=ku[3], stride=1, padding=0)(x)
    x = nn.BatchNorm2d(nu[3])(x)
    x = nn.LeakyReLU()(x)
    x = nn.Conv2d(in_channels = ns[3]+nu[4], out_channels=nu[3], kernel_size = 1, stride = 1, padding = 0)(x)
    x = nn.BatchNorm2d(nu[3])(x)
    x = nn.LeakyReLU()(x)
    x = F.interpolate(x, scale_factor = 2, mode = 'bilinear')


    #########
    # i = 2 #
    #########

    # Third Up
    # self.ups[i].forward(torch.cat([x,s[self.length-1-i]],axis = 1))
    x = torch.cat([x,s2], axis=1) # IMPORTANT HERE
    # Model_Up(in_channels = ns[i]+nu[i+1], nu = nu[i], ku = ku[i])
    x = nn.BatchNorm2d(in_channel=ns[2]+nu[3])(x)
    x = nn.ReflectionPad2d(padding)(x)
    x = nn.Conv2d(in_channels=ns[2]+nu[3], out_channels=nu[2], kernel_size=ku[2], stride=1, padding=0)(x)
    x = nn.BatchNorm2d(nu[2])(x)
    x = nn.LeakyReLU()(x)
    x = nn.Conv2d(in_channels = ns[2]+nu[3], out_channels=nu[2], kernel_size = 1, stride = 1, padding = 0)(x)
    x = nn.BatchNorm2d(nu[2])(x)
    x = nn.LeakyReLU()(x)
    x = F.interpolate(x, scale_factor = 2, mode = 'bilinear')


    #########
    # i = 1 #
    #########

    # Fourth Up
    # self.ups[i].forward(torch.cat([x,s[self.length-1-i]],axis = 1))
    x = torch.cat([x,s1], axis=1) # IMPORTANT HERE
    # Model_Up(in_channels = ns[i]+nu[i+1], nu = nu[i], ku = ku[i])
    x = nn.BatchNorm2d(in_channel=ns[1]+nu[2])(x)
    x = nn.ReflectionPad2d(padding)(x)
    x = nn.Conv2d(in_channels=ns[1]+nu[2], out_channels=nu[1], kernel_size=ku[1], stride=1, padding=0)(x)
    x = nn.BatchNorm2d(nu[1])(x)
    x = nn.LeakyReLU()(x)
    x = nn.Conv2d(in_channels = ns[1]+nu[2], out_channels=nu[1], kernel_size = 1, stride = 1, padding = 0)(x)
    x = nn.BatchNorm2d(nu[1])(x)
    x = nn.LeakyReLU()(x)
    x = F.interpolate(x, scale_factor = 2, mode = 'bilinear')    


    #########
    # i = 0 #
    #########

    # Fifth Up
    # self.ups[i].forward(torch.cat([x,s[self.length-1-i]],axis = 1))
    x = torch.cat([x,s0], axis=1) # IMPORTANT HERE
    # Model_Up(in_channels = ns[i]+nu[i+1], nu = nu[i], ku = ku[i])
    x = nn.BatchNorm2d(in_channel=ns[0]+nu[1])(x)
    x = nn.ReflectionPad2d(padding)(x)
    x = nn.Conv2d(in_channels=ns[0]+nu[1], out_channels=nu[0], kernel_size=ku[0], stride=1, padding=0)(x)
    x = nn.BatchNorm2d(nu[0])(x)
    x = nn.LeakyReLU()(x)
    x = nn.Conv2d(in_channels = nu[0], out_channels=nu[0], kernel_size = 1, stride = 1, padding = 0)(x)
    x = nn.BatchNorm2d(nu[0])(x)
    x = nn.LeakyReLU()(x)
    x = F.interpolate(x, scale_factor = 2, mode = 'bilinear')


    ################################
    # OUT ##########################
    ################################

    x = nn.Conv2d(in_channels=nu[0], out_channels=out_channels, kernel_size=1, padding = 0)
    return nn.Sigmoid()(x) #Squash to RGB ([0,1]) format

最重要的两个部分是:

  1. 在代码的并行部分中处理张量xskips,不干扰主x "pathway"

  2. skip部分产生的张量然后从最后一个开始反馈到“主通路”。我将这些张量作为单个变量s0 to s3,这样它就更明显了

从这张图片中,你可以清楚地看到下半部分给后半部分喂食s0是最长的灰色箭头,它连接到最后一个卷积层组之前的“主通路”。 Unet (不同的U形网)

您也可以从中理解为什么我们不需要存储s4:它直接馈送到下一层,因此不需要将其存储为单独的变量

Module版本确实存储了它,但这只是因为它方便地存储在一个列表中,该列表在末尾以相反的顺序读取。将它们存储在列表中的另一个明显原因是,通过相应地更改参数,我们可以拥有任意数量的UpDown部分

相关问题 更多 >