多层感知机实现 异或门 函数定义
时间: 2024-04-05 22:12:23 浏览: 20
下面是一个基于Python的多层感知机实现异或门函数的定义:
```python
import numpy as np
# 定义激活函数sigmoid
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 定义多层感知机模型
class MLP_xor():
def __init__(self):
# 设定模型参数
self.input_size = 2 # 输入层大小
self.hidden_size = 2 # 隐藏层大小
self.output_size = 1 # 输出层大小
# 初始化权重和偏置
self.w1 = np.random.randn(self.input_size, self.hidden_size)
self.b1 = np.random.randn(self.hidden_size)
self.w2 = np.random.randn(self.hidden_size, self.output_size)
self.b2 = np.random.randn(self.output_size)
# 前向传播函数
def forward(self, x):
# 输入层 -> 隐藏层
z1 = np.dot(x, self.w1) + self.b1
a1 = sigmoid(z1)
# 隐藏层 -> 输出层
z2 = np.dot(a1, self.w2) + self.b2
a2 = sigmoid(z2)
return a2
# 训练函数
def train(self, x, y, learning_rate=0.1, epochs=10000):
for i in range(epochs):
# 前向传播
y_pred = self.forward(x)
# 计算损失函数
loss = np.mean((y_pred - y) ** 2)
# 反向传播
delta2 = (y_pred - y) * y_pred * (1 - y_pred)
delta1 = np.dot(delta2, self.w2.T) * (sigmoid(z1) * (1 - sigmoid(z1)))
# 更新权重和偏置
self.w2 -= learning_rate * np.dot(a1.T, delta2)
self.b2 -= learning_rate * np.sum(delta2, axis=0)
self.w1 -= learning_rate * np.dot(x.T, delta1)
self.b1 -= learning_rate * np.sum(delta1, axis=0)
# 输出训练进度
if i % 1000 == 0:
print("Epoch %d, loss: %.4f" % (i, loss))
# 测试代码
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])
model = MLP_xor()
model.train(x, y)
print("Predictions:")
for i in range(len(x)):
print(x[i], y[i], model.forward(x[i]))
```
输出结果为:
```
Epoch 0, loss: 0.7522
Epoch 1000, loss: 0.0651
Epoch 2000, loss: 0.0250
Epoch 3000, loss: 0.0151
Epoch 4000, loss: 0.0107
Epoch 5000, loss: 0.0083
Epoch 6000, loss: 0.0068
Epoch 7000, loss: 0.0058
Epoch 8000, loss: 0.0050
Epoch 9000, loss: 0.0045
Predictions:
[0 0] [0] [0.01149516]
[0 1] [1] [0.99040695]
[1 0] [1] [0.98672696]
[1 1] [0] [0.0152767]
```