d = 3 # 输入节点个数 l = 1 # 输出节点个数 q = 2 * d + 1 # 隐层个数,采用经验公式2d+1隐藏层公式改成Leaky ReLU怎么改
时间: 2024-01-21 10:03:43 浏览: 70
改变隐藏的input中value值的方法
将原来的隐藏层公式修改为 Leaky ReLU 函数可以直接在原有代码基础上进行修改,只需要将原来的激活函数 `sigmoid` 替换为 `leaky_relu` 即可。具体修改如下:
```python
import numpy as np
def leaky_relu(x, alpha=0.01):
return np.maximum(alpha * x, x)
class BPNeuralNetwork:
def __init__(self, input_nodes, output_nodes, hidden_nodes):
self.input_nodes = input_nodes
self.output_nodes = output_nodes
self.hidden_nodes = hidden_nodes
self.w1 = np.random.randn(self.input_nodes, self.hidden_nodes)
self.b1 = np.zeros((1, self.hidden_nodes))
self.w2 = np.random.randn(self.hidden_nodes, self.output_nodes)
self.b2 = np.zeros((1, self.output_nodes))
def forward(self, X):
self.z1 = np.dot(X, self.w1) + self.b1
self.a1 = leaky_relu(self.z1) # 将 sigmoid 替换为 leaky_relu
self.z2 = np.dot(self.a1, self.w2) + self.b2
self.y_hat = self.z2
def backward(self, X, y, learning_rate=0.1):
m = X.shape[0]
delta_y_hat = self.y_hat - y
delta_z2 = delta_y_hat
delta_w2 = np.dot(self.a1.T, delta_z2)
delta_b2 = np.sum(delta_z2, axis=0, keepdims=True)
delta_a1 = np.dot(delta_z2, self.w2.T)
delta_z1 = delta_a1 * np.where(self.z1 > 0, 1, 0.01) # 使用 leaky ReLU 的导数
delta_w1 = np.dot(X.T, delta_z1)
delta_b1 = np.sum(delta_z1, axis=0)
self.w2 -= learning_rate * delta_w2 / m
self.b2 -= learning_rate * delta_b2 / m
self.w1 -= learning_rate * delta_w1 / m
self.b1 -= learning_rate * delta_b1 / m
def train(self, X, y, epochs=1000, learning_rate=0.1):
for i in range(epochs):
self.forward(X)
self.backward(X, y, learning_rate)
loss = np.mean(np.square(self.y_hat - y))
if i % 100 == 0:
print(f"epoch {i}, loss {loss}")
```
在这里,将 `leaky_relu` 函数作为隐藏层的激活函数,使用了 `np.where` 函数实现了其导数,其中超参数 `alpha` 默认为 0.01。
阅读全文