写一段五层人工神经网络代码,每层的神经元数量分别为12,10,10,10,8
时间: 2024-03-09 15:49:19 浏览: 16
好的,这里是一个用 Python 实现的五层人工神经网络的代码,每层的神经元数量分别为12, 10, 10, 10, 8:
```python
import numpy as np
# 定义神经网络的结构
layer_sizes = [12, 10, 10, 10, 8]
# 初始化权重和偏置项
def initialize_parameters(layer_sizes):
parameters = {}
for l in range(1, len(layer_sizes)):
parameters['W' + str(l)] = np.random.randn(layer_sizes[l], layer_sizes[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_sizes[l], 1))
return parameters
# 定义激活函数
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# 前向传播
def forward_propagation(X, parameters):
caches = []
A = X
L = len(parameters) // 2
for l in range(1, L):
Z = np.dot(parameters['W' + str(l)], A) + parameters['b' + str(l)]
A = sigmoid(Z)
caches.append((Z, A))
Z = np.dot(parameters['W' + str(L)], A) + parameters['b' + str(L)]
AL = sigmoid(Z)
caches.append((Z, AL))
return AL, caches
# 计算成本
def compute_cost(AL, Y):
m = Y.shape[1]
cost = -np.sum(np.multiply(Y, np.log(AL)) + np.multiply(1-Y, np.log(1-AL))) / m
return cost
# 反向传播
def backward_propagation(AL, Y, caches):
grads = {}
L = len(caches)
m = AL.shape[1]
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
current_cache = caches[L-1]
dZL = dAL * sigmoid(current_cache[0]) * (1 - sigmoid(current_cache[0]))
grads['dW' + str(L)] = np.dot(dZL, current_cache[1].T) / m
grads['db' + str(L)] = np.sum(dZL, axis=1, keepdims=True) / m
dA = np.dot(parameters['W' + str(L)].T, dZL)
for l in reversed(range(L-1)):
current_cache = caches[l]
dZ = dA * sigmoid(current_cache[0]) * (1 - sigmoid(current_cache[0]))
grads['dW' + str(l+1)] = np.dot(dZ, current_cache[1].T) / m
grads['db' + str(l+1)] = np.sum(dZ, axis=1, keepdims=True) / m
dA = np.dot(parameters['W' + str(l+1)].T, dZ)
return grads
# 更新参数
def update_parameters(parameters, grads, learning_rate):
L = len(parameters) // 2
for l in range(1, L+1):
parameters['W' + str(l)] = parameters['W' + str(l)] - learning_rate * grads['dW' + str(l)]
parameters['b' + str(l)] = parameters['b' + str(l)] - learning_rate * grads['db' + str(l)]
return parameters
# 训练模型
def train_model(X, Y, layer_sizes, num_iterations, learning_rate):
parameters = initialize_parameters(layer_sizes)
for i in range(num_iterations):
AL, caches = forward_propagation(X, parameters)
cost = compute_cost(AL, Y)
grads = backward_propagation(AL, Y, caches)
parameters = update_parameters(parameters, grads, learning_rate)
if i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
return parameters
# 测试模型
def predict(X, parameters):
AL, caches = forward_propagation(X, parameters)
predictions = (AL > 0.5)
return predictions
# 生成数据集
X = np.random.randn(12, 100)
Y = np.random.randint(2, size=(8, 100))
# 训练模型
parameters = train_model(X, Y, layer_sizes, 10000, 0.01)
# 测试模型
predictions = predict(X, parameters)
```