Coding-MLP-UNET-RESNET-2D OR 3D

MLP

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import torch.nn as nn

class FCModel(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(FCModel, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, num_classes)

def forward(self, x):
batch_size, seq_length, input_size = x.size()
x = x.view(batch_size * seq_length, input_size) # Reshape input to (batch_size * seq_length, input_size)
h = self.fc1(x)
out = self.fc2(h)
out = out.view(batch_size, seq_length, -1) # Reshape output back to (batch_size, seq_length, num_classes)
out = out[:, -1, :] # Take the last time step's output
return out


# 设置超参数
input_size = 13456
hidden_size = 64
num_classes = 2

# 创建模型实例
model = FCModel(input_size, hidden_size, num_classes)

2D-Unet

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import torch
import torch.nn as nn
import torch.nn.functional as F

# UNet的一大层,包含了两层小的卷积
class DoubleConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(DoubleConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)

def forward(self, x):
x = self.conv(x)
return x

# 定义输入进来的第一层
class InConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(InConv, self).__init__()
self.conv = DoubleConv(in_ch, out_ch)

def forward(self, x):
x = self.conv(x)
return x

# 定义encoder中的向下传播,包括一个maxpool和一大层
class Down(nn.Module):
def __init__(self, in_ch, out_ch):
super(Down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_ch, out_ch)
)

def forward(self, x):
x = self.mpconv(x)
return x

# 定义decoder中的向上传播
class Up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(Up, self).__init__()
# 定义了self.up的方法
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2) # // 除以的结果向下取整

self.conv = DoubleConv(in_ch, out_ch)

def forward(self, x1, x2): # x2是左侧的输出,x1是上一大层来的输出
x1 = self.up(x1)

diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]

x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2))
x = torch.cat([x2, x1], dim=1) # 将两个tensor拼接在一起 dim=1:在通道数(C)上进行拼接
x = self.conv(x)
return x

# 定义最终的输出
class OutConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)

def forward(self, x):
x = self.conv(x)
return x

class Unet(nn.Module):
def __init__(self, in_channels, classes): # in_channels 图片的通道数,1为灰度图,3为彩色图
super(Unet, self).__init__()
self.n_channels = in_channels
self.n_classes = classes

self.inc = InConv(in_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 512)
self.up1 = Up(1024, 256)
self.up2 = Up(512, 128)
self.up3 = Up(256, 64)
self.up4 = Up(128, 64)
self.outc = OutConv(64, classes)

def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)

return x

2D-Resnet

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import torch
import torch.nn as nn
class BasicBlock(nn.Module):
expansion = 1

def __init__(self, in_channels, out_channels, stride=1):
super(BasicBlock, self).__init__()

# 第一个卷积层
self.conv1 = nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)

# 第二个卷积层
self.conv2 = nn.Conv2d(
out_channels,
out_channels * self.expansion,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn2 = nn.BatchNorm2d(out_channels * self.expansion)

# 残差连接(shortcut connection)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * self.expansion:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_channels,
out_channels * self.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(out_channels * self.expansion),
)

def forward(self, x):
residual = x

out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)

out = self.conv2(out)
out = self.bn2(out)

out += self.shortcut(residual)
out = self.relu(out)

return out


class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()

self.in_channels = 64

# 第一个卷积层
self.conv1 = nn.Conv2d(
3, 64, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)

# ResNet的四个阶段
self.layer1 = self.make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self.make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self.make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self.make_layer(block, 512, num_blocks[3], stride=2)

# 全局平均池化层和全连接层
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)

def make_layer(self, block, out_channels, num_blocks, stride):
layers = []
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
for _ in range(1, num_blocks):
layers.append(block(self.in_channels, out_channels))
return nn.Sequential(*layers)

def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)

out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)

out = self.avg_pool(out)
out = torch.flatten(out, 1)
out = self.fc(out)

return out

def ResNet18(num_classes=10):
return ResNet(BasicBlock, [2, 2, 2, 2], num_classes)

3D-Resnet

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
class BasicBlock(nn.Module):
expansion = 1

def __init__(self, in_channels, out_channels, stride=1):
super(BasicBlock, self).__init__()

# 第一个卷积层
self.conv1 = nn.Conv3d(
in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm3d(out_channels)
self.relu = nn.ReLU(inplace=True)

# 第二个卷积层
self.conv2 = nn.Conv3d(
out_channels,
out_channels * self.expansion,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn2 = nn.BatchNorm3d(out_channels * self.expansion)

# 残差连接(shortcut connection)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * self.expansion:
self.shortcut = nn.Sequential(
nn.Conv3d(
in_channels,
out_channels * self.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm3d(out_channels * self.expansion),
)

def forward(self, x):
residual = x

out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)

out = self.conv2(out)
out = self.bn2(out)

out += self.shortcut(residual)
out = self.relu(out)

return out


class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()

self.in_channels = 64

# 第一个卷积层
self.conv1 = nn.Conv3d(
1, 64, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)

# ResNet的四个阶段
self.layer1 = self.make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self.make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self.make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self.make_layer(block, 512, num_blocks[3], stride=2)

# 全局平均池化层和全连接层
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)

def make_layer(self, block, out_channels, num_blocks, stride):
layers = []
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
for _ in range(1, num_blocks):
layers.append(block(self.in_channels, out_channels))
return nn.Sequential(*layers)

def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)

out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)

out = self.avg_pool(out)
out = torch.flatten(out, 1)
out = self.fc(out)

return out

def ResNet18_3D(num_classes=10):
return ResNet(BasicBlock, [2, 2, 2, 2], num_classes)