pytorch训练两个模型,6个数据集的代码实现
时间: 2024-02-17 07:00:02 浏览: 77
以下是一个简单的示例代码,演示如何使用PyTorch框架训练两个模型,对6个数据集进行训练:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
# 定义模型1
class Model1(nn.Module):
def __init__(self):
super(Model1, self).__init__()
self.fc1 = nn.Linear(10, 20)
self.fc2 = nn.Linear(20, 5)
self.relu = nn.ReLU()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
# 定义模型2
class Model2(nn.Module):
def __init__(self):
super(Model2, self).__init__()
self.fc1 = nn.Linear(10, 15)
self.fc2 = nn.Linear(15, 3)
self.relu = nn.ReLU()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
# 定义数据集类
class MyDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
x, y = self.data[index]
return torch.tensor(x, dtype=torch.float32), torch.tensor(y, dtype=torch.long)
# 生成数据集
data_sets = []
for i in range(6):
x = torch.randn(100, 10)
y = torch.randint(0, 2, (100,))
data_sets.append(MyDataset(list(zip(x, y))))
# 划分训练集和测试集
train_sets = []
test_sets = []
for i in range(6):
train_size = int(len(data_sets[i]) * 0.8)
test_size = len(data_sets[i]) - train_size
train_set, test_set = torch.utils.data.random_split(data_sets[i], [train_size, test_size])
train_sets.append(train_set)
test_sets.append(test_set)
# 定义超参数和优化器
lr = 0.001
epochs = 50
criterion = nn.CrossEntropyLoss()
optimizer1 = optim.Adam(Model1.parameters(), lr=lr)
optimizer2 = optim.Adam(Model2.parameters(), lr=lr)
# 训练模型1和模型2
for i in range(6):
train_loader = DataLoader(train_sets[i], batch_size=32, shuffle=True)
test_loader = DataLoader(test_sets[i], batch_size=32)
model1 = Model1()
model2 = Model2()
for epoch in range(epochs):
# 训练模型1
model1.train()
for x, y in train_loader:
optimizer1.zero_grad()
output = model1(x)
loss = criterion(output, y)
loss.backward()
optimizer1.step()
# 在测试集上测试模型1
model1.eval()
correct = 0
total = 0
with torch.no_grad():
for x, y in test_loader:
output = model1(x)
_, predicted = torch.max(output.data, 1)
total += y.size(0)
correct += (predicted == y).sum().item()
accuracy1 = 100 * correct / total
# 训练模型2
model2.train()
for x, y in train_loader:
optimizer2.zero_grad()
output = model2(x)
loss = criterion(output, y)
loss.backward()
optimizer2.step()
# 在测试集上测试模型2
model2.eval()
correct = 0
total = 0
with torch.no_grad():
for x, y in test_loader:
output = model2(x)
_, predicted = torch.max(output.data, 1)
total += y.size(0)
correct += (predicted == y).sum().item()
accuracy2 = 100 * correct / total
# 打印训练进度和模型性能
print("Dataset {}: Epoch [{}/{}], Accuracy1: {:.2f}%, Accuracy2: {:.2f}%"
.format(i+1, epoch+1, epochs, accuracy1, accuracy2))
# 保存模型1和模型2
torch.save(model1.state_dict(), "model1_{}.ckpt".format(i+1))
torch.save(model2.state_dict(), "model2_{}.ckpt".format(i+1))
```
以上代码假设每个数据集的输入是10维向量,输出是二分类或三分类标签。代码中使用了Adam优化器和交叉熵损失函数,每个模型的训练过程包括多个epoch,训练过程中打印训练进度和模型性能,并保存训练好的模型。
阅读全文