Loading...

BasicBlock_104与BasicBlock_18的区别

BasicBlock_104:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
class BasicBlock_104(nn.Module):
expansion = 1

def __init__(self, in_channels, out_channels, stride=1):
super().__init__()

"""残差函数主分支"""
self.residual_function = nn.Sequential(
#时间驱动的神经元更新
mem_update(),

#脉冲卷积层
Snn_Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False),
batch_norm_2d(out_channels),

#时间驱动的神经元更新
mem_update(),

#脉冲卷积层
Snn_Conv2d(out_channels,
out_channels * BasicBlock_104.expansion,
kernel_size=3,
padding=1,
bias=False),
batch_norm_2d1(out_channels * BasicBlock_104.expansion),
)
self.shortcut = nn.Sequential()



if stride != 1 or in_channels != BasicBlock_104.expansion * out_channels:
#步长不等于1或者输入通道数不等于输出通道数的情况下

self.shortcut = nn.Sequential(
#平均池化
nn.AvgPool3d((1, 2, 2), stride=(1, 2, 2)),

#SNN卷积层
Snn_Conv2d(in_channels,
out_channels * BasicBlock_104.expansion,
kernel_size=1,
stride=1,
bias=False),

#批规范化处理
batch_norm_2d(out_channels * BasicBlock_104.expansion),
)

def forward(self, x):
return (self.residual_function(x) + self.shortcut(x))

BasicBlock_18:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
class BasicBlock_18(nn.Module):
expansion = 1

def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.residual_function = nn.Sequential(
mem_update(),

Snn_Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False),

batch_norm_2d(out_channels),

mem_update(),

Snn_Conv2d(out_channels,
out_channels * BasicBlock_18.expansion,
kernel_size=3,
padding=1,
bias=False),

batch_norm_2d1(out_channels * BasicBlock_18.expansion),
)
self.shortcut = nn.Sequential()

if stride != 1 or in_channels != BasicBlock_18.expansion * out_channels:
self.shortcut = nn.Sequential(


Snn_Conv2d(in_channels,
out_channels * BasicBlock_18.expansion,
kernel_size=1,
stride=stride,
bias=False),
batch_norm_2d(out_channels * BasicBlock_18.expansion),
)

def forward(self, x):
return (self.residual_function(x) + self.shortcut(x))

ResNet_104与ResNet_origin_18

ResNet_104


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
class ResNet_104(nn.Module):
# Channel:
def __init__(self, block, num_block, num_classes=1000):
#num_block: 表示每个阶段中的残差块数量
super().__init__()
k = 1
self.in_channels = 64 * k
self.conv1 = nn.Sequential(
Snn_Conv2d(3, 64 * k, kernel_size=3, padding=1, stride=2),
Snn_Conv2d(64 * k, 64 * k, kernel_size=3, padding=1, stride=1),
Snn_Conv2d(64 * k, 64 * k, kernel_size=3, padding=1, stride=1),
batch_norm_2d(64 * k),
)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

self.mem_update = mem_update()
self.conv2_x = self._make_layer(block, 64 * k, num_block[0], 2)
self.conv3_x = self._make_layer(block, 128 * k, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256 * k, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512 * k, num_block[3], 2)
self.fc = nn.Linear(512 * block.expansion * k, num_classes)
self.dropout = nn.Dropout(p=0.2)

def _make_layer(self, block, out_channels, num_blocks, stride):
"""构建Resnet模型中的一个残差块组"""

strides = [stride] + [1] * (num_blocks - 1)#定义一个步长列表
layers = []

for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))#将创建的残差块加入到layers列表
self.in_channels = out_channels * block.expansion

return nn.Sequential(*layers)

def forward(self, x):
input = torch.zeros(time_window,
x.size()[0],
3,
x.size()[2],
x.size()[3],
device=device)
for i in range(time_window):
input[i] = x
output = self.conv1(input)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.mem_update(output)
output = F.adaptive_avg_pool3d(output, (None, 1, 1))
output = output.view(output.size()[0], output.size()[1], -1)
output = output.sum(dim=0) / output.size()[0]
output = self.dropout(output)
output = self.fc(output)
return output


ResNet_origin_18:


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
class ResNet_origin_18(nn.Module):

def __init__(self, block, num_block, num_classes=1000):
super().__init__()
k = 1
self.in_channels = 64 * k
self.conv1 = nn.Sequential(
Snn_Conv2d(3,
64 * k,
kernel_size=7,
padding=3,
bias=False,
stride=2),
batch_norm_2d(64 * k),
)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

self.mem_update = mem_update()
self.conv2_x = self._make_layer(block, 64 * k, num_block[0], 2)
self.conv3_x = self._make_layer(block, 128 * k, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256 * k, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512 * k, num_block[3], 2)
self.fc = nn.Linear(512 * block.expansion * k, num_classes)

def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion

return nn.Sequential(*layers)

def forward(self, x):
input = torch.zeros(time_window,
x.size()[0],
3,
x.size()[2],
x.size()[3],
device=device)
for i in range(time_window):
input[i] = x
output = self.conv1(input)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.mem_update(output)
output = F.adaptive_avg_pool3d(output, (None, 1, 1))
output = output.view(output.size()[0], output.size()[1], -1)
output = output.sum(dim=0) / output.size()[0]
output = self.fc(output)
return output