python实现人工神经网络二分类代码
时间: 2023-08-06 10:05:05 浏览: 257
以下是基于Python实现人工神经网络二分类的代码示例:
```python
import numpy as np
# 定义sigmoid函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 定义神经网络
class NeuralNetwork:
# 初始化神经网络
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
# 随机初始化权重
self.W1 = np.random.randn(self.input_size, self.hidden_size)
self.b1 = np.zeros((1, self.hidden_size))
self.W2 = np.random.randn(self.hidden_size, self.output_size)
self.b2 = np.zeros((1, self.output_size))
# 前向传播
def forward(self, X):
self.z1 = np.dot(X, self.W1) + self.b1
self.a1 = sigmoid(self.z1)
self.z2 = np.dot(self.a1, self.W2) + self.b2
self.y_hat = sigmoid(self.z2)
return self.y_hat
# 反向传播
def backward(self, X, y, y_hat, learning_rate):
# 计算损失函数的导数
dLoss = y_hat - y
dLoss_z2 = dLoss * sigmoid(self.z2) * (1 - sigmoid(self.z2))
dLoss_a1 = np.dot(dLoss_z2, self.W2.T)
dLoss_z1 = dLoss_a1 * sigmoid(self.z1) * (1 - sigmoid(self.z1))
# 更新权重和偏置
self.W2 -= learning_rate * np.dot(self.a1.T, dLoss_z2)
self.b2 -= learning_rate * np.sum(dLoss_z2, axis=0, keepdims=True)
self.W1 -= learning_rate * np.dot(X.T, dLoss_z1)
self.b1 -= learning_rate * np.sum(dLoss_z1, axis=0, keepdims=True)
# 训练神经网络
def train(self, X, y, num_iterations, learning_rate):
for i in range(num_iterations):
y_hat = self.forward(X)
self.backward(X, y, y_hat, learning_rate)
if i % 1000 == 0:
loss = np.mean(-y * np.log(y_hat) - (1 - y) * np.log(1 - y_hat))
print('Loss after iteration %d: %f' % (i, loss))
# 测试代码
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])
nn = NeuralNetwork(2, 3, 1)
nn.train(X, y, 10000, 0.1)
print('Predictions:')
print(nn.forward(X))
```
此代码实现了一个包含一个输入层、一个隐藏层和一个输出层的神经网络,输入层和输出层各有2个神经元,隐藏层有3个神经元。在测试代码中,我们使用了4个训练样本,每个样本包含2个特征和1个标签。神经网络通过反向传播算法来学习权重和偏置,并输出对于输入样本的预测结果。
阅读全文