基于numpy与pandas库的定义优化器optimize代码及损失函数折线图的输出代码
时间: 2024-03-17 11:43:53 浏览: 19
下面是一个基于 NumPy 和 Pandas 库的优化器 `optimize` 代码示例以及绘制损失函数折线图的输出代码:
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Optimizer:
def __init__(self, lr=0.01):
self.lr = lr
def update(self, params, gradients):
pass
class SGD(Optimizer):
def __init__(self, lr=0.01):
super().__init__(lr)
def update(self, params, gradients):
for param, grad in zip(params, gradients):
param -= self.lr * grad
class Momentum(Optimizer):
def __init__(self, lr=0.01, momentum=0.9):
super().__init__(lr)
self.momentum = momentum
self.velocities = None
def update(self, params, gradients):
if self.velocities is None:
self.velocities = [np.zeros_like(param) for param in params]
for i, (param, grad) in enumerate(zip(params, gradients)):
self.velocities[i] = self.momentum * self.velocities[i] - self.lr * grad
param += self.velocities[i]
class Adam(Optimizer):
def __init__(self, lr=0.001, beta1=0.9, beta2=0.999, eps=1e-8):
super().__init__(lr)
self.beta1 = beta1
self.beta2 = beta2
self.eps = eps
self.m = None
self.v = None
self.t = 0
def update(self, params, gradients):
if self.m is None:
self.m = [np.zeros_like(param) for param in params]
self.v = [np.zeros_like(param) for param in params]
self.t += 1
lr_t = self.lr * np.sqrt(1 - self.beta2**self.t) / (1 - self.beta1**self.t)
for i, (param, grad) in enumerate(zip(params, gradients)):
self.m[i] = self.beta1 * self.m[i] + (1 - self.beta1) * grad
self.v[i] = self.beta2 * self.v[i] + (1 - self.beta2) * grad**2
param -= lr_t * self.m[i] / (np.sqrt(self.v[i]) + self.eps)
def loss_function(x, y, w, b):
y_pred = np.dot(x, w) + b
mse = np.mean((y - y_pred)**2)
return mse
def optimize(x, y, optimizer, epochs=100, batch_size=32, verbose=True):
n_samples, n_features = x.shape
n_batches = n_samples // batch_size
if n_samples % batch_size != 0:
n_batches += 1
w = np.zeros(n_features)
b = 0
losses = []
for epoch in range(epochs):
for i in range(n_batches):
start = i * batch_size
end = start + batch_size
xb = x[start:end]
yb = y[start:end]
y_pred = np.dot(xb, w) + b
grad_w = (-2 / batch_size) * np.dot(xb.T, (yb - y_pred))
grad_b = (-2 / batch_size) * np.sum(yb - y_pred)
optimizer.update([w, b], [grad_w, grad_b])
loss = loss_function(x, y, w, b)
losses.append(loss)
if verbose and epoch % 10 == 0:
print(f"Epoch {epoch}: loss = {loss:.4f}")
return w, b, losses
# Example usage:
x = np.random.rand(100, 5)
y = np.random.rand(100)
optimizer = Adam(lr=0.01)
w, b, losses = optimize(x, y, optimizer, epochs=100, batch_size=32)
plt.plot(losses)
plt.xlabel("Iterations")
plt.ylabel("Loss")
plt.show()
```
这里定义了三种常用的优化器:随机梯度下降(SGD)、动量法(Momentum)和自适应矩估计(Adam),以及一个均方误差损失函数 `loss_function`。`optimize` 函数用于训练模型并返回最终的模型参数以及每个迭代步骤的损失值。在训练过程中,我们可以指定优化器、迭代次数、批次大小等参数。
在示例代码中,我们使用了随机生成的样本数据 `x` 和 `y`,并使用 Adam 优化器进行模型训练。最后,我们绘制了损失函数随迭代次数变化的折线图。