train_acc = (self.predict(X_batch) == y_batch).mean()
时间: 2024-06-07 12:09:02 浏览: 11
这行代码是用来计算模型在训练集上的准确率的。其中,self.predict(X_batch)是用模型对X_batch进行预测得到的输出结果,然后通过与y_batch进行比较,得到一个布尔型数组,表示预测结果与真实标签是否相同。mean()函数将这个布尔型数组转化为准确率,即正确预测的样本数占总样本数的比例。
相关问题
在SVM中,linear_svm.py、linear_classifier.py和svm.ipynb中相应的代码
linear_svm.py:
```python
import numpy as np
class LinearSVM:
def __init__(self, lr=0.01, reg=0.01, num_iters=1000, batch_size=32):
self.lr = lr
self.reg = reg
self.num_iters = num_iters
self.batch_size = batch_size
self.W = None
self.b = None
def train(self, X, y):
num_train, dim = X.shape
num_classes = np.max(y) + 1
if self.W is None:
self.W = 0.001 * np.random.randn(dim, num_classes)
self.b = np.zeros((1, num_classes))
loss_history = []
for i in range(self.num_iters):
batch_idx = np.random.choice(num_train, self.batch_size)
X_batch = X[batch_idx]
y_batch = y[batch_idx]
loss, grad_W, grad_b = self.loss(X_batch, y_batch)
loss_history.append(loss)
self.W -= self.lr * grad_W
self.b -= self.lr * grad_b
return loss_history
def predict(self, X):
scores = X.dot(self.W) + self.b
y_pred = np.argmax(scores, axis=1)
return y_pred
def loss(self, X_batch, y_batch):
num_train = X_batch.shape[0]
scores = X_batch.dot(self.W) + self.b
correct_scores = scores[range(num_train), y_batch]
margins = np.maximum(0, scores - correct_scores[:, np.newaxis] + 1)
margins[range(num_train), y_batch] = 0
loss = np.sum(margins) / num_train + 0.5 * self.reg * np.sum(self.W * self.W)
num_pos = np.sum(margins > 0, axis=1)
dscores = np.zeros_like(scores)
dscores[margins > 0] = 1
dscores[range(num_train), y_batch] -= num_pos
dscores /= num_train
grad_W = np.dot(X_batch.T, dscores) + self.reg * self.W
grad_b = np.sum(dscores, axis=0, keepdims=True)
return loss, grad_W, grad_b
```
linear_classifier.py:
```python
import numpy as np
class LinearClassifier:
def __init__(self, lr=0.01, reg=0.01, num_iters=1000, batch_size=32):
self.lr = lr
self.reg = reg
self.num_iters = num_iters
self.batch_size = batch_size
self.W = None
self.b = None
def train(self, X, y):
num_train, dim = X.shape
num_classes = np.max(y) + 1
if self.W is None:
self.W = 0.001 * np.random.randn(dim, num_classes)
self.b = np.zeros((1, num_classes))
loss_history = []
for i in range(self.num_iters):
batch_idx = np.random.choice(num_train, self.batch_size)
X_batch = X[batch_idx]
y_batch = y[batch_idx]
loss, grad_W, grad_b = self.loss(X_batch, y_batch)
loss_history.append(loss)
self.W -= self.lr * grad_W
self.b -= self.lr * grad_b
return loss_history
def predict(self, X):
scores = X.dot(self.W) + self.b
y_pred = np.argmax(scores, axis=1)
return y_pred
def loss(self, X_batch, y_batch):
num_train = X_batch.shape[0]
scores = X_batch.dot(self.W) + self.b
correct_scores = scores[range(num_train), y_batch]
margins = np.maximum(0, scores - correct_scores[:, np.newaxis] + 1)
margins[range(num_train), y_batch] = 0
loss = np.sum(margins) / num_train + 0.5 * self.reg * np.sum(self.W * self.W)
num_pos = np.sum(margins > 0, axis=1)
dscores = np.zeros_like(scores)
dscores[margins > 0] = 1
dscores[range(num_train), y_batch] -= num_pos
dscores /= num_train
grad_W = np.dot(X_batch.T, dscores) + self.reg * self.W
grad_b = np.sum(dscores, axis=0, keepdims=True)
return loss, grad_W, grad_b
```
svm.ipynb:
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, make_moons
from sklearn.model_selection import train_test_split
from linear_classifier import LinearClassifier
def plot_data(X, y, title):
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.RdBu)
plt.title(title)
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.show()
def plot_decision_boundary(clf, X, y, title):
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.RdBu)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xx = np.linspace(xlim[0], xlim[1], 100)
yy = np.linspace(ylim[0], ylim[1], 100)
XX, YY = np.meshgrid(xx, yy)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
Z = clf.predict(xy).reshape(XX.shape)
plt.contour(XX, YY, Z, levels=[0], colors='k', linestyles='-')
plt.title(title)
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.show()
def main():
X, y = make_blobs(n_samples=200, centers=2, random_state=42)
plot_data(X, y, 'Linearly Separable Data')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
clf = LinearClassifier()
loss_history = clf.train(X_train, y_train)
train_acc = np.mean(clf.predict(X_train) == y_train)
test_acc = np.mean(clf.predict(X_test) == y_test)
print('Train accuracy: {:.3f}, Test accuracy: {:.3f}'.format(train_acc, test_acc))
plot_decision_boundary(clf, X, y, 'Linear SVM')
if __name__ == '__main__':
main()
```
以上的代码实现了一个简单的线性 SVM,可以用于二分类问题。在 `svm.ipynb` 文件中,我们使用 `make_blobs` 生成了一个线性可分的数据集,然后将其拆分为训练集和测试集。接着,我们使用 `LinearClassifier` 对训练集进行训练,并在测试集上评估模型性能。最后,我们绘制了模型的决策边界。
误差反向传播法的实现 输出使用后4000个样本训练得到模型在测试集上的结果,参数设置为:batch:300;iters_num:2000;lambda:0.1
误差反向传播法(Backpropagation)是一种常用的神经网络训练方法,它通过计算神经网络输出与真实标签之间的误差,然后根据误差大小来更新神经网络中的参数。
具体实现步骤如下:
1. 初始化神经网络参数,包括权重和偏置。
2. 从训练数据中随机选择一批样本(batch),将这些样本输入神经网络,计算输出结果。
3. 计算输出结果与真实标签之间的误差,使用误差函数(如均方误差)来衡量误差大小。
4. 根据误差大小来计算梯度,即误差对于每个参数的偏导数。
5. 使用梯度下降法来更新神经网络中的参数,即将每个参数的值减去对应的学习率乘以梯度。
6. 重复步骤2-5,直到达到指定的迭代次数(iters_num)或误差达到指定的阈值。
代码实现如下:
```
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_grad(x):
return (1 - sigmoid(x)) * sigmoid(x)
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size):
self.params = {}
self.params['W1'] = 0.01 * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = 0.01 * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def predict(self, x):
W1, b1, W2, b2 = self.params['W1'], self.params['b1'], self.params['W2'], self.params['b2']
z1 = np.dot(x, W1) + b1
a1 = sigmoid(z1)
z2 = np.dot(a1, W2) + b2
y = z2
return y
def loss(self, x, t):
y = self.predict(x)
loss = np.mean((y - t) ** 2) + 0.5 * lambda_reg * (np.sum(self.params['W1'] ** 2) + np.sum(self.params['W2'] ** 2))
return loss
def accuracy(self, x, t):
y = self.predict(x)
accuracy = np.mean((y > 0.5) == (t == 1)) * 100
return accuracy
def numerical_gradient(self, x, t):
h = 1e-4
grads = {}
for param_name in self.params:
param = self.params[param_name]
grad = np.zeros_like(param)
for i in range(param.shape[0]):
for j in range(param.shape[1]):
tmp_val = param[i,j]
param[i,j] = tmp_val + h
f1 = self.loss(x, t)
param[i,j] = tmp_val - h
f2 = self.loss(x, t)
grad[i,j] = (f1 - f2) / (2 * h)
param[i,j] = tmp_val
grads[param_name] = grad
return grads
def gradient(self, x, t):
W1, b1, W2, b2 = self.params['W1'], self.params['b1'], self.params['W2'], self.params['b2']
grads = {}
batch_num = x.shape[0]
# forward
z1 = np.dot(x, W1) + b1
a1 = sigmoid(z1)
z2 = np.dot(a1, W2) + b2
y = z2
# backward
delta2 = y - t
grads['W2'] = np.dot(a1.T, delta2)
grads['b2'] = np.sum(delta2, axis=0)
delta1 = np.dot(delta2, W2.T) * sigmoid_grad(z1)
grads['W1'] = np.dot(x.T, delta1)
grads['b1'] = np.sum(delta1, axis=0)
# add regularization
grads['W2'] += lambda_reg * W2
grads['W1'] += lambda_reg * W1
return grads
def fit(self, x_train, y_train, x_test, y_test, batch_size=100, epochs=10, learning_rate=0.1, lambda_reg=0.1):
self.lambda_reg = lambda_reg
train_loss_list = []
train_acc_list = []
test_acc_list = []
train_size = x_train.shape[0]
iter_per_epoch = max(train_size / batch_size, 1)
for epoch in range(epochs):
perm = np.random.permutation(train_size)
for i in range(0, train_size, batch_size):
x_batch = x_train[perm[i:i+batch_size]]
y_batch = y_train[perm[i:i+batch_size]]
grads = self.gradient(x_batch, y_batch)
for param_name in self.params:
self.params[param_name] -= learning_rate * grads[param_name]
train_loss = self.loss(x_train, y_train)
train_loss_list.append(train_loss)
train_acc = self.accuracy(x_train, y_train)
train_acc_list.append(train_acc)
test_acc = self.accuracy(x_test, y_test)
test_acc_list.append(test_acc)
print("epoch: %d, train_loss: %f, train_acc: %f, test_acc: %f" % (epoch+1, train_loss, train_acc, test_acc))
return train_loss_list, train_acc_list, test_acc_list
# 读取数据
x_train = np.load('x_train.npy')
y_train = np.load('y_train.npy')
x_test = np.load('x_test.npy')
y_test = np.load('y_test.npy')
# 构建神经网络模型
input_size = x_train.shape[1]
hidden_size = 100
output_size = 1
net = TwoLayerNet(input_size, hidden_size, output_size)
# 训练神经网络模型
batch_size = 300
iters_num = 2000
lambda_reg = 0.1
train_loss_list, train_acc_list, test_acc_list = net.fit(x_train, y_train, x_test, y_test, batch_size, iters_num, lambda_reg)
# 输出测试集结果
test_acc = net.accuracy(x_test, y_test)
print("Test accuracy: %f" % test_acc)
```
在输出后4000个样本训练得到模型在测试集上的结果时,我们只需要将训练集中的前4000个样本用来训练模型,然后再使用测试集来测试模型的准确率即可。具体代码如下:
```
# 取出训练集中的前4000个样本
x_train = x_train[:4000]
y_train = y_train[:4000]
# 训练神经网络模型
batch_size = 300
iters_num = 2000
lambda_reg = 0.1
train_loss_list, train_acc_list, test_acc_list = net.fit(x_train, y_train, x_test, y_test, batch_size, iters_num, lambda_reg)
# 输出测试集结果
test_acc = net.accuracy(x_test, y_test)
print("Test accuracy: %f" % test_acc)
```
相关推荐
![zip](https://img-home.csdnimg.cn/images/20210720083736.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)