python神经网络代码
时间: 2023-08-30 07:09:25 浏览: 31
以下是一个简单的基于Python的神经网络代码示例:
```python
import numpy as np
# 定义神经网络类
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
# 初始化权重和偏置
self.weights1 = np.random.randn(input_size, hidden_size)
self.bias1 = np.random.randn(hidden_size)
self.weights2 = np.random.randn(hidden_size, output_size)
self.bias2 = np.random.randn(output_size)
# 定义前向传播函数
def forward(self, inputs):
self.z1 = np.dot(inputs, self.weights1) + self.bias1
self.a1 = self.sigmoid(self.z1)
self.z2 = np.dot(self.a1, self.weights2) + self.bias2
self.a2 = self.sigmoid(self.z2)
return self.a2
# 定义反向传播函数
def backward(self, inputs, targets, learning_rate):
error = self.a2 - targets
delta2 = error * self.sigmoid_derivative(self.z2)
d_weights2 = np.dot(self.a1.T, delta2)
d_bias2 = np.sum(delta2, axis=0)
delta1 = np.dot(delta2, self.weights2.T) * self.sigmoid_derivative(self.z1)
d_weights1 = np.dot(inputs.T, delta1)
d_bias1 = np.sum(delta1, axis=0)
# 更新权重和偏置
self.weights1 -= learning_rate * d_weights1
self.bias1 -= learning_rate * d_bias1
self.weights2 -= learning_rate * d_weights2
self.bias2 -= learning_rate * d_bias2
# 定义sigmoid函数及其导数
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
return self.sigmoid(x) * (1 - self.sigmoid(x))
# 定义训练数据
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
targets = np.array([[0], [1], [1], [0]])
# 初始化神经网络
nn = NeuralNetwork(2, 3, 1)
# 训练神经网络
for i in range(10000):
nn.forward(inputs)
nn.backward(inputs, targets, 0.1)
# 测试神经网络
print(nn.forward(np.array([0, 0])))
print(nn.forward(np.array([0, 1])))
print(nn.forward(np.array([1, 0])))
print(nn.forward(np.array([1, 1])))
```
这是一个简单的两层神经网络,使用sigmoid作为激活函数,并使用梯度下降算法进行权重和偏置的更新。你可以根据需要修改网络的层数、激活函数等参数来构建不同的神经网络。