import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from PIL import Image
import matplotlib.pyplot as plt
class Model_Down(nn.Module):
"""
Convolutional (Downsampling) Blocks.
nd = Number of Filters
kd = Kernel size
"""
def __init__(self,in_channels, nd = 128, kd = 3, padding = 1, stride = 2):
super(Model_Down,self).__init__()
self.padder = nn.ReflectionPad2d(padding)
self.conv1 = nn.Conv2d(in_channels = in_channels, out_channels = nd, kernel_size = kd, stride = stride)
self.bn1 = nn.BatchNorm2d(nd)
self.conv2 = nn.Conv2d(in_channels = nd, out_channels = nd, kernel_size = kd, stride = 1)
self.bn2 = nn.BatchNorm2d(nd)
self.relu = nn.LeakyReLU()
def forward(self, x):
x = self.padder(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.padder(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
return x
class Model_Skip(nn.Module):
"""
Skip Connections
ns = Number of filters
ks = Kernel size
"""
def __init__(self,in_channels = 128, ns = 4, ks = 1, padding = 0, stride = 1):
super(Model_Skip, self).__init__()
self.conv = nn.Conv2d(in_channels = in_channels, out_channels = ns, kernel_size = ks, stride = stride, padding = padding)
self.bn = nn.BatchNorm2d(ns)
self.relu = nn.LeakyReLU()
def forward(self,x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Model_Up(nn.Module):
"""
Convolutional (Downsampling) Blocks.
nd = Number of Filters
kd = Kernel size
"""
def __init__(self, in_channels = 132, nu = 128, ku = 3, padding = 1):
super(Model_Up, self).__init__()
self.bn1 = nn.BatchNorm2d(in_channels)
self.padder = nn.ReflectionPad2d(padding)
self.conv1 = nn.Conv2d(in_channels = in_channels, out_channels = nu, kernel_size = ku, stride = 1, padding = 0)
self.bn2 = nn.BatchNorm2d(nu)
self.conv2 = nn.Conv2d(in_channels = nu, out_channels = nu, kernel_size = 1, stride = 1, padding = 0) #According to supmat.pdf ku = 1 for second layer
self.bn3 = nn.BatchNorm2d(nu)
self.relu = nn.LeakyReLU()
def forward(self,x):
x = self.bn1(x)
x = self.padder(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn3(x)
x = self.relu(x)
x = F.interpolate(x, scale_factor = 2, mode = 'bilinear')
return x
class Model(nn.Module):
def __init__(self, length = 5, in_channels = 32, out_channels = 3, nu = [128,128,128,128,128] , nd =
[128,128,128,128,128], ns = [4,4,4,4,4], ku = [3,3,3,3,3], kd = [3,3,3,3,3], ks = [1,1,1,1,1]):
super(Model,self).__init__()
assert length == len(nu), 'Hyperparameters do not match network depth.'
self.length = length
self.downs = nn.ModuleList([Model_Down(in_channels = nd[i-1], nd = nd[i], kd = kd[i]) if i != 0 else
Model_Down(in_channels = in_channels, nd = nd[i], kd = kd[i]) for i in range(self.length)])
self.skips = nn.ModuleList([Model_Skip(in_channels = nd[i], ns = ns[i], ks = ks[i]) for i in range(self.length)])
self.ups = nn.ModuleList([Model_Up(in_channels = ns[i]+nu[i+1], nu = nu[i], ku = ku[i]) if i != self.length-1 else
Model_Up(in_channels = ns[i], nu = nu[i], ku = ku[i]) for i in range(self.length-1,-1,-1)]) #Elements ordered backwards
self.conv_out = nn.Conv2d(nu[0],out_channels,1,padding = 0)
self.sigm = nn.Sigmoid()
def forward(self,x):
s = [] #Skip Activations
#Downpass
for i in range(self.length):
x = self.downs[i].forward(x)
s.append(self.skips[i].forward(x))
#Uppass
for i in range(self.length):
if (i == 0):
x = self.ups[i].forward(s[-1])
else:
x = self.ups[i].forward(torch.cat([x,s[self.length-1-i]],axis = 1))
x = self.sigm(self.conv_out(x)) #Squash to RGB ([0,1]) format
return x
这段代码是我正在处理的一个修改过的UNet
。我面临着难以阅读和理解的代码,以及如何将跳过连接连接到上采样。请任何人解释一下,或者用更简单易懂的方式写,而不需要nn.ModuleList
有人能用图表展示一下这个网络的样子吗
这是githublinkrepo链接,我从中获取了这段代码并试图理解它
这里是主模型
forward(x)
方法的函数等价物。它要详细得多,但它正在“分解”操作流程,使其更容易理解我假设列表参数的长度总是
5
(我在[0,4]范围内,包括在内),因此我可以正确地解压(它遵循默认的参数集)最重要的两个部分是:
在代码的并行部分中处理张量
x
的skips
,不干扰主x "pathway"
从
skip
部分产生的张量然后从最后一个开始反馈到“主通路”。我将这些张量作为单个变量s0 to s3
,这样它就更明显了从这张图片中,你可以清楚地看到下半部分给后半部分喂食
s0
是最长的灰色箭头,它连接到最后一个卷积层组之前的“主通路”。 (不同的U形网)您也可以从中理解为什么我们不需要存储
s4
:它直接馈送到下一层,因此不需要将其存储为单独的变量Module
版本确实存储了它,但这只是因为它方便地存储在一个列表中,该列表在末尾以相反的顺序读取。将它们存储在列表中的另一个明显原因是,通过相应地更改参数,我们可以拥有任意数量的Up
和Down
部分相关问题 更多 >
编程相关推荐