cnn numpy实现
时间: 2023-10-20 19:05:50 浏览: 81
numpy-使用numpy实现的机器学习算法之CNN.zip
下面是一个基本的CNN实现,使用numpy库进行计算。
首先,我们需要导入numpy库并定义一些常量和超参数:
```python
import numpy as np
# 定义常量和超参数
np.random.seed(1)
NUM_EPOCHS = 10
LEARNING_RATE = 0.1
BATCH_SIZE = 32
# 数据维度
INPUT_DIM = (28, 28, 1) # 输入数据是28x28的灰度图像
NUM_CLASSES = 10 # 有10个类别
FILTERS = [32, 64] # 滤波器数量
KERNEL_SIZES = [3, 3] # 滤波器尺寸
STRIDES = [1, 1] # 卷积步长
PADDING = ["same", "same"] # 填充方式
POOL_SIZES = [2, 2] # 池化尺寸
```
接下来,我们定义一个CNN类,其中包含了卷积层、池化层、全连接层等。
```python
class ConvolutionalNeuralNetwork:
def __init__(self):
# 初始化参数
self.params = {}
# 卷积层1
self.params["W1"] = np.random.randn(FILTERS[0], KERNEL_SIZES[0], KERNEL_SIZES[0], INPUT_DIM[2]) / np.sqrt(KERNEL_SIZES[0] * KERNEL_SIZES[0] * INPUT_DIM[2])
self.params["b1"] = np.zeros((FILTERS[0], 1))
# 池化层1
self.params["W2"] = np.random.randn(POOL_SIZES[0], POOL_SIZES[0], FILTERS[0], FILTERS[1]) / np.sqrt(POOL_SIZES[0] * POOL_SIZES[0] * FILTERS[0])
self.params["b2"] = np.zeros((FILTERS[1], 1))
# 卷积层2
self.params["W3"] = np.random.randn(FILTERS[1], KERNEL_SIZES[1], KERNEL_SIZES[1], FILTERS[0]) / np.sqrt(KERNEL_SIZES[1] * KERNEL_SIZES[1] * FILTERS[0])
self.params["b3"] = np.zeros((FILTERS[1], 1))
# 池化层2
self.params["W4"] = np.random.randn(POOL_SIZES[1], POOL_SIZES[1], FILTERS[1], NUM_CLASSES) / np.sqrt(POOL_SIZES[1] * POOL_SIZES[1] * FILTERS[1])
self.params["b4"] = np.zeros((NUM_CLASSES, 1))
def relu(self, x):
return np.maximum(0, x)
def conv(self, X, W, b, stride, padding):
n_H_prev, n_W_prev, n_C_prev = X.shape
f, f, n_C_prev, n_C = W.shape
if padding == "same":
pad = int((f - 1) / 2)
X = np.pad(X, [(pad, pad), (pad, pad), (0, 0)], mode="constant")
n_H = int((n_H_prev - f) / stride) + 1
n_W = int((n_W_prev - f) / stride) + 1
Z = np.zeros((n_H, n_W, n_C))
for h in range(n_H):
for w in range(n_W):
for c in range(n_C):
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
X_slice = X[vert_start:vert_end, horiz_start:horiz_end, :]
Z[h, w, c] = np.sum(X_slice * W[:, :, :, c]) + b[c]
return Z
def max_pool(self, X, pool_size, stride):
n_H_prev, n_W_prev, n_C_prev = X.shape
f = pool_size
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
Z = np.zeros((n_H, n_W, n_C))
for h in range(n_H):
for w in range(n_W):
for c in range(n_C):
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
X_slice = X[vert_start:vert_end, horiz_start:horiz_end, c]
Z[h, w, c] = np.max(X_slice)
return Z
def softmax(self, X):
exp_X = np.exp(X)
return exp_X / np.sum(exp_X, axis=0)
def forward(self, X):
# 卷积层1
Z1 = self.conv(X, self.params["W1"], self.params["b1"], STRIDES[0], PADDING[0])
A1 = self.relu(Z1)
# 池化层1
P1 = self.max_pool(A1, POOL_SIZES[0], STRIDES[0])
# 卷积层2
Z2 = self.conv(P1, self.params["W3"], self.params["b3"], STRIDES[1], PADDING[1])
A2 = self.relu(Z2)
# 池化层2
P2 = self.max_pool(A2, POOL_SIZES[1], STRIDES[1])
# 展开
F = P2.reshape((P2.shape[0] * P2.shape[1] * P2.shape[2], 1))
# 全连接层
Z3 = np.dot(self.params["W4"].reshape((-1, NUM_CLASSES)).T, F) + self.params["b4"]
A3 = self.softmax(Z3)
return A3
def backward(self, X, Y_hat, Y):
# 计算梯度
dZ3 = Y_hat - Y
dW4 = np.dot(P2.T.reshape((-1, FILTERS[1])), dZ3.T)
db4 = np.sum(dZ3, axis=1, keepdims=True)
dF = np.dot(self.params["W4"].reshape((-1, NUM_CLASSES)), dZ3)
dP2 = dF.reshape(P2.shape)
dA2 = np.zeros(A2.shape)
# 反向传播池化层2
for h in range(P2.shape[0]):
for w in range(P2.shape[1]):
for c in range(P2.shape[2]):
vert_start = h * STRIDES[1]
vert_end = vert_start + POOL_SIZES[1]
horiz_start = w * STRIDES[1]
horiz_end = horiz_start + POOL_SIZES[1]
X_slice = X[vert_start:vert_end, horiz_start:horiz_end, :]
dA2_slice = np.ones((POOL_SIZES[1], POOL_SIZES[1], FILTERS[1])) * dP2[h, w, c] / (POOL_SIZES[1] * POOL_SIZES[1])
dA2[vert_start:vert_end, horiz_start:horiz_end, c] += dA2_slice
# 反向传播卷积层2
dZ2 = dA2 * (A2 > 0)
dW3 = np.zeros(self.params["W3"].shape)
db3 = np.zeros(self.params["b3"].shape)
for c in range(FILTERS[1]):
for h in range(dZ2.shape[0]):
for w in range(dZ2.shape[1]):
vert_start = h * STRIDES[1]
vert_end = vert_start + KERNEL_SIZES[1]
horiz_start = w * STRIDES[1]
horiz_end = horiz_start + KERNEL_SIZES[1]
X_slice = P1[vert_start:vert_end, horiz_start:horiz_end, :]
dW3[:, :, :, c] += X_slice * dZ2[h, w, c]
db3[c] += dZ2[h, w, c]
# 反向传播池化层1
dP1 = np.zeros(P1.shape)
for h in range(P1.shape[0]):
for w in range(P1.shape[1]):
for c in range(P1.shape[2]):
vert_start = h * STRIDES[0]
vert_end = vert_start + POOL_SIZES[0]
horiz_start = w * STRIDES[0]
horiz_end = horiz_start + POOL_SIZES[0]
X_slice = A1[vert_start:vert_end, horiz_start:horiz_end, :]
dP1_slice = np.ones((POOL_SIZES[0], POOL_SIZES[0], FILTERS[0])) * dA2[h, w, c] / (POOL_SIZES[0] * POOL_SIZES[0])
dP1[vert_start:vert_end, horiz_start:horiz_end, c] += dP1_slice
# 反向传播卷积层1
dZ1 = dP1 * (A1 > 0)
dW1 = np.zeros(self.params["W1"].shape)
db1 = np.zeros(self.params["b1"].shape)
for c in range(FILTERS[0]):
for h in range(dZ1.shape[0]):
for w in range(dZ1.shape[1]):
vert_start = h * STRIDES[0]
vert_end = vert_start + KERNEL_SIZES[0]
horiz_start = w * STRIDES[0]
horiz_end = horiz_start + KERNEL_SIZES[0]
X_slice = X[vert_start:vert_end, horiz_start:horiz_end, :]
dW1[:, :, :, c] += X_slice * dZ1[h, w, c]
db1[c] += dZ1[h, w, c]
# 更新参数
self.params["W1"] -= LEARNING_RATE * dW1 / BATCH_SIZE
self.params["b1"] -= LEARNING_RATE * db1 / BATCH_SIZE
self.params["W3"] -= LEARNING_RATE * dW3 / BATCH_SIZE
self.params["b3"] -= LEARNING_RATE * db3 / BATCH_SIZE
self.params["W4"] -= LEARNING_RATE * dW4 / BATCH_SIZE
self.params["b4"] -= LEARNING_RATE * db4 / BATCH_SIZE
```
最后,我们生成训练数据并进行训练:
```python
# 生成训练数据
x_train = np.random.randn(1000, INPUT_DIM[0], INPUT_DIM[1], INPUT_DIM[2])
y_train = np.random.randint(0, NUM_CLASSES, (1000, 1))
# 初始化模型
model = ConvolutionalNeuralNetwork()
# 训练模型
for epoch in range(NUM_EPOCHS):
for i in range(0, len(x_train), BATCH_SIZE):
X_batch = x_train[i:i+BATCH_SIZE]
Y_batch = y_train[i:i+BATCH_SIZE]
Y_hat = model.forward(X_batch)
model.backward(X_batch, Y_hat, Y_batch)
Y_hat_train = model.forward(x_train)
loss_train = -np.mean(np.log(Y_hat_train[np.arange(len(y_train)), y_train.reshape(-1)]))
print("Epoch %d: loss_train = %f" % (epoch, loss_train))
```
以上代码仅为参考实现,实际应用中还需要进行一些调整和优化。
阅读全文