Coding-神经网络训练框架

摘要

神经网络训练框架

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# 设置超参数
# input_size = 1625 # mini_spectrum
input_size = 2048 # good_spectrum
hidden_size = 128
num_classes = 5
learning_rate = 0.001
num_epochs = 300
batch_size = 32

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 训练模型
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
for inputs, labels in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()

print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {running_loss/len(train_loader):.4f}')

# 测试模型
model.eval()
with torch.no_grad():
correct = 0
total = 0
class_correct = list(0. for i in range(num_classes))
class_total = list(0. for i in range(num_classes))

for inputs, labels in test_loader:
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()

# 统计每个类的准确率
c = (predicted == labels).squeeze()
for i in range(len(labels)):
_label = labels[i]
class_correct[_label] += c[i].item()
class_total[_label] += 1

print(f'Accuracy of the model on the test data: {100 * correct / total:.2f}%')
# 统计每个类的样本数量