pytorch实现一维深度卷积神经网络抑郁识别代码
时间: 2023-10-15 19:07:24 浏览: 168
以下是一个简单的 PyTorch 实现一维深度卷积神经网络抑郁识别的代码:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
class DepressionDataset(Dataset):
def __init__(self, X, y):
self.X = torch.tensor(X, dtype=torch.float32)
self.y = torch.tensor(y, dtype=torch.long)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
class DepressionNet(nn.Module):
def __init__(self):
super(DepressionNet, self).__init__()
self.conv1 = nn.Conv1d(in_channels=1, out_channels=32, kernel_size=3)
self.conv2 = nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3)
self.pool = nn.MaxPool1d(kernel_size=2)
self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(in_features=64 * 23, out_features=128)
self.fc2 = nn.Linear(in_features=128, out_features=2)
def forward(self, x):
x = self.conv1(x)
x = nn.functional.relu(x)
x = self.pool(x)
x = self.conv2(x)
x = nn.functional.relu(x)
x = self.pool(x)
x = self.dropout(x)
x = torch.flatten(x, start_dim=1)
x = self.fc1(x)
x = nn.functional.relu(x)
x = self.dropout(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, device):
model.train()
for inputs, labels in dataloader:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
def evaluate(model, dataloader, criterion, device):
model.eval()
running_loss = 0.0
running_corrects = 0
with torch.no_grad():
for inputs, labels in dataloader:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
running_loss += loss.item() * inputs.size(0)
_, preds = torch.max(outputs, 1)
running_corrects += torch.sum(preds == labels.data)
loss = running_loss / len(dataloader.dataset)
acc = running_corrects.double() / len(dataloader.dataset)
return loss, acc
if __name__ == '__main__':
X_train, y_train = ...
X_val, y_val = ...
train_dataset = DepressionDataset(X_train, y_train)
val_dataset = DepressionDataset(X_val, y_val)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DepressionNet().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
best_acc = 0.0
for epoch in range(20):
train(model, train_loader, criterion, optimizer, device)
val_loss, val_acc = evaluate(model, val_loader, criterion, device)
print(f'Epoch {epoch + 1}: val_loss={val_loss:.4f}, val_acc={val_acc:.4f}')
if val_acc > best_acc:
best_acc = val_acc
torch.save(model.state_dict(), 'best_model.pth')
```
这段代码中,我们首先定义了一个 `DepressionDataset` 类来加载数据,然后定义了一个 `DepressionNet` 类来构建模型。在 `DepressionNet` 类中,我们使用了一维卷积层、池化层、全连接层和 Dropout 层来构建神经网络。在训练过程中,我们使用了 Adam 优化器和交叉熵损失函数。最后,我们进行了 20 个 epoch 的训练,并保存了最好的模型。
阅读全文