以下代码用于在keras中使用tensorflow后端定义CNN架构:
class DownBlock(object):
def __init__(self, prev_layer, num_chann = 16, depthwise_initializer = 'glorot_uniform', kernel_initializer = 'glorot_uniform', bias_initializer = 'zeros', drop_rate = None, spdrop_rate = None, activation = 'relu', pool = True):
self.prev_layer = prev_layer
if pool == True:
self.prev_layer = MaxPooling2D((2, 2)) (self.prev_layer)
self.prev_layer = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.prev_layer)
self.convo = Activation(activation) (self.prev_layer)
self.convo = BatchNormalization() (self.convo)
if not spdrop_rate == None:
self.convo = SpatialDropout2D(spdrop_rate) (self.convo)
if not drop_rate == None:
self.convo = Dropout(drop_rate) (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = DepthwiseConv2D((3, 3), depthwise_initializer = depthwise_initializer, bias_initializer = bias_initializer, padding = 'same') (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = Activation(activation) (self.convo)
self.convo = BatchNormalization() (self.convo)
if not spdrop_rate == None:
self.convo = SpatialDropout2D(spdrop_rate) (self.convo)
if not drop_rate == None:
self.convo = Dropout(drop_rate) (self.convo)
self.convo = DepthwiseConv2D((3, 3), depthwise_initializer = depthwise_initializer, bias_initializer = bias_initializer, padding = 'same') (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = Add([self.prev_layer, self.convo])
def get(self):
return self.convo
class UpBlock(object):
def __init__(self, prev_layer, bridge_layer, num_chann = 16, depthwise_initializer = 'glorot_uniform', kernel_initializer = 'glorot_uniform', bias_initializer = 'zeros', drop_rate = None, spdrop_rate = None, activation = 'relu', up = True):
self.prev_layer = prev_layer
self.bridge_layer = bridge_layer
self.convo = Activation(activation) (self.prev_layer)
self.convo = BatchNormalization() (self.convo)
if not spdrop_rate == None:
self.convo = SpatialDropout2D(spdrop_rate) (self.convo)
if not drop_rate == None:
self.convo = Dropout(drop_rate) (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = DepthwiseConv2D((3, 3), depthwise_initializer = depthwise_initializer, bias_initializer = bias_initializer, padding = 'same') (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = Activation(activation) (self.convo)
self.convo = BatchNormalization() (self.convo)
if not spdrop_rate == None:
self.convo = SpatialDropout2D(spdrop_rate) (self.convo)
if not drop_rate == None:
self.convo = Dropout(drop_rate) (self.convo)
self.convo = DepthwiseConv2D((3, 3), depthwise_initializer = depthwise_initializer, bias_initializer = bias_initializer, padding = 'same') (self.convo)
self.convo = Conv2D(num_chann, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = Add([self.prev_layer, self.convo])
if up == True:
self.convo = Conv2D(num_chann/2, (1, 1), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer) (self.convo)
self.convo = Conv2DTranspose(num_chann/2, (2, 2), strides = (2, 2), kernel_initializer = kernel_initializer, bias_initializer = bias_initializer, padding = 'same') (self.convo)
self.convo = Add([self.bridge_layer, self.convo])
def get(self):
return self.convo
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x / 255) (inputs)
s = Conv2D(8, (1, 1)) (s)
d1 = DownBlock(s, num_chann = 16, drop_rate = 0.1)
d2 = DownBlock(d1.get(), num_chann = 32, drop_rate = 0.1)
d3 = DownBlock(d2.get(), num_chann = 64, drop_rate = 0.1)
d4 = DownBlock(d3.get(), num_chann = 128, drop_rate = 0.1)
d5 = DownBlock(d4.get(), num_chann = 256, drop_rate = 0.1)
m = DownBlock(d5.get(), num_chann = 512, drop_rate = 0.1)
u5 = UpBlock(m.get(), d4.get(), num_chann = 256, drop_rate = 0.1)
u4 = UpBlock(u5.get(), d3.get(), num_chann = 128, drop_rate = 0.1)
u3 = UpBlock(u4.get(), d2.get(), num_chann = 64, drop_rate = 0.1)
u2 = UpBlock(u3.get(), d1.get(), num_chann = 32, drop_rate = 0.1)
u1 = UpBlock(u2.get(), s, num_chann = 16, drop_rate = 0.1)
final = Conv2D(1, (1, 1)) (u1.get())
# final = SpatialDropout2D(0.1) (final)
final = Dropout(0.1) (final)
final = BatchNormalization() (final)
outputs = Activation("sigmoid") (final)
model = Model(inputs = [inputs], outputs = [outputs])
在Jupyter笔记本内执行时,将生成以下stacktrace:
^{pr2}$跟踪的最后一行。。。在
TypeError: __init__() takes 1 positional argument but 2 were given
。。。讨论了将两个位置参数传递给第一个UpBlock()调用,而我显然传递了一个-
d1 = DownBlock(s, num_chann = 16, drop_rate = 0.1)
另一个位置参数在哪里?如果没有,为什么我会得到这个错误?在
尽管错误源于对
DownBlock
构造函数的调用,但Python也注意到错误回溯是(most recent call last)
。此错误是指将两个参数传递给Add
构造函数。Python告诉您的是对Add()
的调用有太多的参数。在这里的诀窍是,虽然看起来您只向
Add()
提供一个列表参数,但Python类构造函数都会接收一个隐式self
参数作为它们的第一个位置参数。见the Python docs。在来自评论:
在Keras中使用函数API时,必须首先创建层对象,如
a = Add()
中所示,然后通过调用结果对象将层添加到计算图中,如下所示:out = a([input1, input2, ...])
或者在你最初的例子中:
self.convo = Add()([self.prev_layer, self.convo])
相关问题 更多 >
编程相关推荐