请将以下代码转换成PyTorch下的代码
时间: 2024-05-10 13:20:48 浏览: 94
以下是参考代码:
```python
import tensorflow as tf
# Define the model
model = tf.keras.Sequential([
tf.keras.layers.Dense(32, activation='relu', input_shape=(784,)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
# Define the loss function and optimizer
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
# Load the data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Preprocess the data
x_train = x_train.reshape(-1, 784).astype('float32') / 255.0
x_test = x_test.reshape(-1, 784).astype('float32') / 255.0
# Train the model
batch_size = 64
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
epochs = 10
for epoch in range(epochs):
for x_batch_train, y_batch_train in train_ds:
with tf.GradientTape() as tape:
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
for x_batch_test, y_batch_test in test_ds:
logits = model(x_batch_test, training=False)
loss_value = loss_fn(y_batch_test, logits)
print("Epoch {}, test loss: {}".format(epoch, loss_value))
```
以下是转换后的PyTorch代码:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
# Define the model
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layer1 = nn.Linear(784, 32)
self.dropout = nn.Dropout(0.2)
self.layer2 = nn.Linear(32, 10)
def forward(self, x):
x = self.layer1(x)
x = nn.functional.relu(x)
x = self.dropout(x)
x = self.layer2(x)
return x
model = Model()
# Define the loss function and optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# Load the data
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=True)
# Train the model
epochs = 10
for epoch in range(epochs):
for x_batch_train, y_batch_train in train_loader:
optimizer.zero_grad()
logits = model(x_batch_train.view(-1, 784))
loss_value = loss_fn(logits, y_batch_train)
loss_value.backward()
optimizer.step()
with torch.no_grad():
for x_batch_test, y_batch_test in test_loader:
logits = model(x_batch_test.view(-1, 784))
loss_value = loss_fn(logits, y_batch_test)
print("Epoch {}, test loss: {}".format(epoch, loss_value))
```
阅读全文