pytorch 贝叶斯优化代码
时间: 2023-10-18 08:15:48 浏览: 106
文本分类小说的Bayes(贝叶斯)优化LSTM-pytorch框架实现(包含完整代码)
以下是一个使用 pytorch 和 Bayesian Optimization 库进行模型超参数优化的示例代码:
```
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.sampler import RandomSampler
from bayes_opt import BayesianOptimization
class MyDataset(Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return len(self.x)
class MyModel(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(MyModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
x = self.fc1(x)
x = nn.functional.relu(x)
x = self.fc2(x)
return x
def train(model, train_loader, optimizer, criterion):
model.train()
total_loss = 0.0
for inputs, targets in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
total_loss += loss.item()
return total_loss / len(train_loader)
def evaluate(model, val_loader, criterion):
model.eval()
total_loss = 0.0
with torch.no_grad():
for inputs, targets in val_loader:
outputs = model(inputs)
loss = criterion(outputs, targets)
total_loss += loss.item()
return total_loss / len(val_loader)
def optimize(hidden_dim, learning_rate, weight_decay):
# Load data
x_train = torch.randn(1000, 10)
y_train = torch.randn(1000, 1)
dataset = MyDataset(x_train, y_train)
train_loader = DataLoader(dataset, batch_size=32, sampler=RandomSampler(dataset))
x_val = torch.randn(100, 10)
y_val = torch.randn(100, 1)
val_dataset = MyDataset(x_val, y_val)
val_loader = DataLoader(val_dataset, batch_size=32)
# Define model
input_dim = 10
output_dim = 1
model = MyModel(input_dim, int(hidden_dim), output_dim)
# Define optimizer and criterion
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
criterion = nn.MSELoss()
# Train and evaluate model
num_epochs = 10
for epoch in range(num_epochs):
train_loss = train(model, train_loader, optimizer, criterion)
val_loss = evaluate(model, val_loader, criterion)
print('Epoch %d: Train loss=%.4f Val loss=%.4f' % (epoch, train_loss, val_loss))
return -val_loss # minimize validation loss
# Define parameter bounds for optimization
pbounds = {'hidden_dim': (10, 100), 'learning_rate': (1e-4, 1e-2), 'weight_decay': (1e-6, 1e-4)}
# Create optimizer object and run optimization
optimizer = BayesianOptimization(
f=optimize,
pbounds=pbounds,
random_state=42,
)
optimizer.maximize(init_points=2, n_iter=10)
# Print best hyperparameters
print(optimizer.max)
```
这个示例定义了一个简单的线性模型,并使用 MSE 损失函数进行训练和验证。 `optimize` 函数接受超参数的值,并返回验证集上的损失,因为 Bayesian Optimization 库需要最小化函数值,所以我们将返回值取负。 然后,我们定义超参数的边界,并使用 `BayesianOptimization` 对象来运行优化。在这个示例中,我们运行了 2 次初始点和 10 次迭代,最后输出最佳超参数组合和相应的验证损失。
阅读全文