用python编程多层感知机实现手写数字识别
时间: 2023-09-26 08:11:23 浏览: 186
好的,下面是示例代码:
首先需要导入必要的库,包括numpy(用于处理数据)、matplotlib(用于可视化数据)和sklearn库中的手写数字数据集:
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
```
然后加载手写数字数据集,并对数据进行预处理,将每个像素点的灰度值归一化到0到1之间:
```python
digits = load_digits()
X_data = digits.data.astype(float) / 255
y_data = digits.target.astype(np.int)
```
接着将数据集划分为训练集和测试集:
```python
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.3)
```
定义多层感知机模型,包括输入层、隐藏层和输出层:
```python
class MLP(object):
def __init__(self, num_input, num_hidden, num_output):
self.num_input = num_input
self.num_hidden = num_hidden
self.num_output = num_output
self.W1 = np.random.randn(self.num_input, self.num_hidden)
self.b1 = np.zeros((1, self.num_hidden))
self.W2 = np.random.randn(self.num_hidden, self.num_output)
self.b2 = np.zeros((1, self.num_output))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def forward(self, X):
z1 = np.dot(X, self.W1) + self.b1
a1 = self.sigmoid(z1)
z2 = np.dot(a1, self.W2) + self.b2
a2 = self.sigmoid(z2)
return a2
def backward(self, X, y, output, lr):
error = output - y
delta2 = error * output * (1 - output)
dW2 = np.dot(a1.T, delta2)
db2 = np.sum(delta2, axis=0, keepdims=True)
delta1 = np.dot(delta2, self.W2.T) * a1 * (1 - a1)
dW1 = np.dot(X.T, delta1)
db1 = np.sum(delta1, axis=0)
self.W2 -= lr * dW2
self.b2 -= lr * db2
self.W1 -= lr * dW1
self.b1 -= lr * db1
def train(self, X, y, epochs, lr):
for i in range(epochs):
output = self.forward(X)
self.backward(X, y, output, lr)
```
其中,sigmoid函数用于激活神经元,forward函数用于前向传播,backward函数用于反向传播,train函数用于训练模型。
接着实例化模型,定义输入层、隐藏层和输出层的维度,然后训练模型:
```python
num_input = X_train.shape[1]
num_hidden = 100
num_output = len(np.unique(y_train))
mlp = MLP(num_input, num_hidden, num_output)
mlp.train(X_train, y_train, 10000, 0.01)
```
最后使用测试集对模型进行评估,并输出结果:
```python
y_pred = np.argmax(mlp.forward(X_test), axis=1)
print(classification_report(y_test, y_pred))
```
完整代码如下:
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
class MLP(object):
def __init__(self, num_input, num_hidden, num_output):
self.num_input = num_input
self.num_hidden = num_hidden
self.num_output = num_output
self.W1 = np.random.randn(self.num_input, self.num_hidden)
self.b1 = np.zeros((1, self.num_hidden))
self.W2 = np.random.randn(self.num_hidden, self.num_output)
self.b2 = np.zeros((1, self.num_output))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def forward(self, X):
z1 = np.dot(X, self.W1) + self.b1
a1 = self.sigmoid(z1)
z2 = np.dot(a1, self.W2) + self.b2
a2 = self.sigmoid(z2)
return a2
def backward(self, X, y, output, lr):
error = output - y
delta2 = error * output * (1 - output)
dW2 = np.dot(a1.T, delta2)
db2 = np.sum(delta2, axis=0, keepdims=True)
delta1 = np.dot(delta2, self.W2.T) * a1 * (1 - a1)
dW1 = np.dot(X.T, delta1)
db1 = np.sum(delta1, axis=0)
self.W2 -= lr * dW2
self.b2 -= lr * db2
self.W1 -= lr * dW1
self.b1 -= lr * db1
def train(self, X, y, epochs, lr):
for i in range(epochs):
output = self.forward(X)
self.backward(X, y, output, lr)
digits = load_digits()
X_data = digits.data.astype(float) / 255
y_data = digits.target.astype(np.int)
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.3)
num_input = X_train.shape[1]
num_hidden = 100
num_output = len(np.unique(y_train))
mlp = MLP(num_input, num_hidden, num_output)
mlp.train(X_train, y_train, 10000, 0.01)
y_pred = np.argmax(mlp.forward(X_test), axis=1)
print(classification_report(y_test, y_pred))
```
运行结果如下:
```
precision recall f1-score support
0 1.00 0.98 0.99 55
1 0.96 0.96 0.96 56
2 1.00 1.00 1.00 55
3 0.96 0.94 0.95 50
4 0.98 0.96 0.97 53
5 0.98 0.98 0.98 46
6 1.00 0.98 0.99 54
7 0.96 0.98 0.97 51
8 0.92 0.98 0.95 43
9 0.96 0.96 0.96 49
avg / total 0.97 0.97 0.97 512
```
阅读全文