import torch import torch.nn as nn import torch.optim as optim # 定义数据 x1 = torch.tensor([0, 2.8, 5.6, 8.4, 11.2, 14, 16.8], dtype=torch.float32) x2 = torch.tensor([0, 20, 387, 680, 783, 850, 1120], dtype=torch.float32) y = torch.tensor([0, 87.94, 2447.78, 5253, 6625.75, 7868.45, 12012], dtype=torch.float32) # 定义模型 class LinearRegression(nn.Module): def __init__(self): super(LinearRegression, self).__init__() self.linear = nn.Linear(2, 1) # 输入特征维度为2,输出特征维度为1 def forward(self, x): out = self.linear(x) return out model = LinearRegression() # 定义损失函数和优化器 criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.01) # 训练模型 num_epochs = 1000 for epoch in range(num_epochs): inputs = torch.stack([x1, x2], dim=1) # 将两个自变量堆叠在一起 outputs = model(inputs) loss = criterion(outputs.squeeze(), y) # 计算损失 optimizer.zero_grad() loss.backward() optimizer.step() if (epoch+1) % 100 == 0: print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) # 打印模型参数 print("模型参数:") for name, param in model.named_parameters(): if param.requires_grad: print(name, param.data) # 使用模型进行预测 test_input = torch.tensor([[6, 12]], dtype=torch.float32) predicted = model(test_input) print("预测结果:", predicted.item())
时间: 2024-04-17 14:22:54 浏览: 21
你的代码看起来很不错!你根据自己的数据设置了x1、x2和y的值,并定义了一个继承自nn.Module的线性回归模型。你还使用了均方误差损失函数和随机梯度下降优化器进行训练,并打印了训练过程中的损失值。最后,你使用训练好的模型进行了预测并打印了预测结果。
这个代码示例中使用的数据和模型都是符合线性回归的特征,所以应该能得到较好的结果。如果你有任何其他问题或者需要进一步的帮助,请随时告诉我!
相关问题
import torch import torch.nn.functional as F import torch.optim as optim from torchvision import transforms from torchvision import datasets from torch.utils. data import DataI oader 补全代码
下面是一个基本的 PyTorch MNIST 数据集训练代码示例,包括数据加载、模型定义、优化器和损失函数的定义以及训练过程。
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
# 定义数据预处理
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# 加载数据集
train_dataset = datasets.MNIST('data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST('data', train=False, download=True, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
# 定义模型
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
net = Net()
# 定义优化器和损失函数
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.5)
criterion = nn.CrossEntropyLoss()
# 训练模型
def train(epoch):
net.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = net(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test():
net.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = net(data)
test_loss += criterion(output, target)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, 5):
train(epoch)
test()
```
这是一个简单的卷积神经网络模型,用于对 MNIST 手写数字进行分类。训练过程中使用了交叉熵损失函数和随机梯度下降优化器。
import torch.optim as optim
from typing import List,Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, input_size:int, hidden_size:List[int], output_size:int, dropout:float):
super(Net, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout = dropout
# Construct the hidden layers
self.hidden_layers = nn.ModuleList()
for i in range(len(hidden_size)):
if i == 0:
self.hidden_layers.append(nn.Linear(input_size, hidden_size[i]))
else:
self.hidden_layers.append(nn.Linear(hidden_size[i-1], hidden_size[i]))
# Construct the output layer
self.output_layer = nn.Linear(hidden_size[-1], output_size)
# Set up the dropout layer
self.dropout_layer = nn.Dropout(p=dropout)
def forward(self, x:torch.Tensor) -> torch.Tensor:
# Pass the input through the hidden layers
for layer in self.hidden_layers:
x = F.relu(layer(x))
x = self.dropout_layer(x)
# Pass the output from the last hidden layer through the output layer
x = self.output_layer(x)
return x
def train_model(model:Net, train_data:Tuple[torch.Tensor, torch.Tensor],
test_data:Tuple[torch.Tensor, torch.Tensor],
batch_size:int, num_epochs:int, learning_rate:float):
# Extract the inputs and labels from the training data
train_inputs, train_labels = train_data
# Define the loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Loop over the training data for the specified number of epochs
for epoch in range(num_epochs):
# Shuffle the training data
perm = torch.randperm(train_inputs.size(0))
train_inputs = train_inputs[perm]
train_labels = train_labels[perm]
# Loop over the training data in batches
for i in range(0, train_inputs.size(0), batch_size):
# Extract the current batch of data
inputs = train_inputs[i:i+batch_size]
labels = train_labels[i:i+batch_size]
# Zero the gradients
optimizer.zero_grad()
# Forward pass
outputs = model(inputs)
loss = criterion(outputs, labels)
# Backward pass and update parameters
loss.backward()
optimizer.step()
# Evaluate the model on the test data
test_inputs, test_labels = test_data
test_outputs = model(test_inputs)
test_loss = criterion(test_outputs, test_labels)
test_accuracy = accuracy(test_outputs, test_labels)
# Print the epoch number, training loss, and test accuracy
print(f"Epoch {epoch+1}/{num_epochs}: Train loss={loss:.4f}, Test loss={test_loss:.4f}, Test accuracy={test_accuracy:.4f}")
def accuracy(outputs:torch.Tensor, labels:torch.Tensor) -> float:
predictions = torch.argmax(outputs, dim=1)
correct_predictions = torch.sum(predictions == labels)
accuracy = correct_predictions.float() / labels.size(0)
return accuracy.item()