bp神经网络python并输出准确率
时间: 2024-02-02 18:03:38 浏览: 104
python实现bp神经网络
4星 · 用户满意度95%
下面是一个简单的 BP 神经网络的 Python 实现,并输出准确率的示例:
```python
import numpy as np
class NeuralNetwork:
def __init__(self, layers):
self.layers = layers
self.weights = [np.random.randn(y, x) for x, y in zip(layers[:-1], layers[1:])]
self.biases = [np.random.randn(y, 1) for y in layers[1:]]
def sigmoid(self, z):
return 1.0 / (1.0 + np.exp(-z))
def sigmoid_prime(self, z):
return self.sigmoid(z) * (1 - self.sigmoid(z))
def feedforward(self, a):
for w, b in zip(self.weights, self.biases):
a = self.sigmoid(np.dot(w, a) + b)
return a
def backpropagation(self, x, y):
nabla_w = [np.zeros(w.shape) for w in self.weights]
nabla_b = [np.zeros(b.shape) for b in self.biases]
activation = x
activations = [x]
zs = []
for w, b in zip(self.weights, self.biases):
z = np.dot(w, activation) + b
zs.append(z)
activation = self.sigmoid(z)
activations.append(activation)
delta = (activations[-1] - y) * self.sigmoid_prime(zs[-1])
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
nabla_b[-1] = delta
for l in range(2, len(self.layers)):
z = zs[-l]
sp = self.sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
nabla_b[-l] = delta
return (nabla_w, nabla_b)
def train(self, training_data, epochs, mini_batch_size, learning_rate, test_data=None):
n = len(training_data)
for i in range(epochs):
np.random.shuffle(training_data)
mini_batches = [training_data[k:k+mini_batch_size] for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
nabla_w = [np.zeros(w.shape) for w in self.weights]
nabla_b = [np.zeros(b.shape) for b in self.biases]
for x, y in mini_batch:
delta_nabla_w, delta_nabla_b = self.backpropagation(x, y)
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
self.weights = [w-(learning_rate/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(learning_rate/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)]
if test_data:
print("Epoch {}: Accuracy {:.2f}%".format(i+1, self.evaluate(test_data)*100))
else:
print("Epoch {} complete".format(i+1))
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), np.argmax(y)) for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results) / len(test_data)
```
这个实现中,我们使用了 numpy 来处理矩阵运算。我们可以使用 `NeuralNetwork` 类来构建一个神经网络,然后使用 `train` 方法来训练网络。其中,`training_data` 是训练数据,`epochs` 是迭代次数,`mini_batch_size` 是小批量大小,`learning_rate` 是学习率,`test_data` 是测试数据。
我们可以将训练集和测试集转换为 numpy 数组,然后创建一个 `NeuralNetwork` 对象,指定层数和神经元数量,然后调用 `train` 方法来训练网络。例如:
```python
# 导入数据集
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
# 加载数据集
digits = load_digits()
# 将数据集转换为 numpy 数组
X = digits.images.reshape((len(digits.images), -1)).T / 16.0
y = np.eye(10)[digits.target].T
# 将数据集分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X.T, y.T, test_size=0.3, random_state=42)
# 创建一个神经网络对象
nn = NeuralNetwork([64, 30, 10])
# 训练神经网络
nn.train(list(zip(X_train, y_train)), epochs=100, mini_batch_size=10, learning_rate=3.0, test_data=list(zip(X_test, y_test)))
```
在训练完成后,我们可以使用 `evaluate` 方法来计算网络的准确率。例如:
```python
# 计算测试集的准确率
accuracy = nn.evaluate(list(zip(X_test, y_test)))
print("Test Accuracy: {:.2f}%".format(accuracy*100))
```
完整的代码示例可以在以下链接中找到:https://github.com/zhiyongc/Neural-Networks/blob/master/bp_nn.py
阅读全文