多层全连接神经网络,代码
时间: 2023-07-22 18:18:15 浏览: 47
以下是一个简单的多层全连接神经网络的 Python 代码示例:
```python
import numpy as np
class NeuralNetwork:
def __init__(self, num_inputs, num_hidden_layers, num_hidden_units, num_outputs):
self.num_inputs = num_inputs
self.num_hidden_layers = num_hidden_layers
self.num_hidden_units = num_hidden_units
self.num_outputs = num_outputs
# 初始化权重和偏置
self.weights = []
self.biases = []
for i in range(self.num_hidden_layers + 1):
if i == 0:
self.weights.append(np.random.randn(self.num_inputs, self.num_hidden_units))
self.biases.append(np.zeros((1, self.num_hidden_units)))
elif i == self.num_hidden_layers:
self.weights.append(np.random.randn(self.num_hidden_units, self.num_outputs))
self.biases.append(np.zeros((1, self.num_outputs)))
else:
self.weights.append(np.random.randn(self.num_hidden_units, self.num_hidden_units))
self.biases.append(np.zeros((1, self.num_hidden_units)))
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
return x * (1 - x)
def forward(self, x):
self.hidden_outputs = []
self.activations = []
for i in range(self.num_hidden_layers + 1):
if i == 0:
activation = np.dot(x, self.weights[0]) + self.biases[0]
hidden_output = self.sigmoid(activation)
self.activations.append(activation)
self.hidden_outputs.append(hidden_output)
elif i == self.num_hidden_layers:
activation = np.dot(self.hidden_outputs[-1], self.weights[-1]) + self.biases[-1]
output = self.sigmoid(activation)
self.activations.append(activation)
self.output = output
else:
activation = np.dot(self.hidden_outputs[-1], self.weights[i]) + self.biases[i]
hidden_output = self.sigmoid(activation)
self.activations.append(activation)
self.hidden_outputs.append(hidden_output)
return self.output
def backward(self, x, y, learning_rate):
# 反向传播
error = y - self.output
delta = error * self.sigmoid_derivative(self.output)
self.weights[-1] += learning_rate * np.dot(self.hidden_outputs[-1].T, delta)
self.biases[-1] += learning_rate * np.sum(delta, axis=0, keepdims=True)
for i in range(self.num_hidden_layers - 1, -1, -1):
error = np.dot(delta, self.weights[i+1].T)
delta = error * self.sigmoid_derivative(self.hidden_outputs[i+1])
self.weights[i] += learning_rate * np.dot(self.hidden_outputs[i].T, delta)
self.biases[i] += learning_rate * np.sum(delta, axis=0, keepdims=True)
error = np.dot(delta, self.weights[1].T)
delta = error * self.sigmoid_derivative(self.hidden_outputs[0])
self.weights[0] += learning_rate * np.dot(x.T, delta)
self.biases[0] += learning_rate * np.sum(delta, axis=0)
def train(self, x_train, y_train, num_epochs, learning_rate):
for i in range(num_epochs):
for j in range(x_train.shape[0]):
x = x_train[j]
y = y_train[j]
output = self.forward(x)
self.backward(x, y, learning_rate)
def predict(self, x_test):
predictions = []
for x in x_test:
output = self.forward(x)
predictions.append(output)
return predictions
```
这个神经网络有 `num_hidden_layers` 层隐藏层,每层有 `num_hidden_units` 个神经元,输入维度为 `num_inputs`,输出维度为 `num_outputs`。`forward` 方法用于执行前向传播,`backward` 方法用于执行反向传播,`train` 方法用于训练模型,`predict` 方法用于进行预测。