帮我写一个python版的卷积神经网络
时间: 2023-09-08 15:04:40 浏览: 113
### 回答1:
好的,下面是一个简单的Python版卷积神经网络的实现代码:
```python
import numpy as np
class Conv2D:
def __init__(self, in_channels, out_channels, kernel_size):
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.weights = np.random.randn(out_channels, in_channels, kernel_size, kernel_size) * np.sqrt(2.0 / (in_channels * kernel_size * kernel_size))
self.bias = np.zeros(out_channels)
def forward(self, x):
batch_size, in_channels, height, width = x.shape
out_height = height - self.kernel_size + 1
out_width = width - self.kernel_size + 1
self.x_col = self.im2col(x)
self.w_col = self.weights.reshape(self.out_channels, -1)
out = np.dot(self.w_col, self.x_col) + self.bias.reshape(-1, 1)
out = out.reshape(self.out_channels, out_height, out_width, batch_size)
out = out.transpose(3, 0, 1, 2) # swap dimensions to (batch_size, out_channels, out_height, out_width)
return out
def backward(self, dout):
dout = dout.transpose(1, 2, 3, 0).reshape(self.out_channels, -1)
dw = np.dot(dout, self.x_col.T).reshape(self.weights.shape)
db = np.sum(dout, axis=1)
dx_col = np.dot(self.weights.reshape(self.out_channels, -1).T, dout)
dx = self.col2im(dx_col)
return dx, dw, db
def im2col(self, x):
batch_size, in_channels, height, width = x.shape
self.col_height = height - self.kernel_size + 1
self.col_width = width - self.kernel_size + 1
col = np.zeros((batch_size, in_channels, self.kernel_size, self.kernel_size, self.col_height, self.col_width))
for y in range(self.kernel_size):
y_max = y + self.col_height
for x in range(self.kernel_size):
x_max = x + self.col_width
col[:, :, y, x, :, :] = x[y:y_max, x:x_max]
col = col.transpose(0, 4, 5, 1, 2, 3).reshape(batch_size * self.col_height * self.col_width, -1)
return col
def col2im(self, col):
col = col.reshape(-1, self.in_channels * self.kernel_size * self.kernel_size, self.col_height, self.col_width)
x = np.zeros((col.shape[0], self.in_channels, self.col_height + self.kernel_size - 1, self.col_width + self.kernel_size - 1))
for y in range(self.kernel_size):
y_max = y + self.col_height
for x in range(self.kernel_size):
x_max = x + self.col_width
x[:, :, y:y_max, x:x_max] += col[:, :, y, x, :, :]
return x[:, :, self.kernel_size - 1:self.col_height, self.kernel_size - 1:self.col_width]
class MaxPool2D:
def __init__(self, pool_size):
self.pool_size = pool_size
def forward(self, x):
batch_size, in_channels, height, width = x.shape
out_height = height // self.pool_size
out_width = width // self.pool_size
x_reshaped = x.reshape(batch_size, in_channels, out_height, self.pool_size, out_width, self.pool_size)
out = np.max(x_reshaped, axis=(3, 5))
self.mask = np.zeros_like(x_reshaped)
for i in range(self.pool_size):
for j in range(self.pool_size):
self.mask[:, :, :, i, :, j] = (x_reshaped[:, :, :, i, :, j] == out[:, :, :, np.newaxis, :, np.newaxis])
out = out.transpose(0, 1, 3, 2)
return out
def backward(self, dout):
dout = dout.transpose(0, 1, 3, 2)
dx = self.mask * dout[:, :, :, np.newaxis, :, :]
dx = dx.reshape(dx.shape[0], dx.shape[1], dx.shape[2], -1)
dx = np.sum(dx, axis=3)
return dx
class Flatten:
def forward(self, x):
self.batch_size = x.shape[0]
return x.reshape(self.batch_size, -1)
def backward(self, dout):
return dout.reshape(self.batch_size, *self.input_shape)
class Dense:
def __init__(self, in_features, out_features):
self.in_features = in_features
self.out_features = out_features
self.weights = np.random.randn(in_features, out_features) * np.sqrt(2.0 / in_features)
self.bias = np.zeros(out_features)
def forward(self, x):
self.x = x
return np.dot(x, self.weights) + self.bias
def backward(self, dout):
dw = np.dot(self.x.T, dout)
db = np.sum(dout, axis=0)
dx = np.dot(dout, self.weights.T)
return dx, dw, db
class ReLU:
def forward(self, x):
self.mask = (x <= 0)
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class SoftmaxWithCrossEntropy:
def forward(self, x, y):
self.batch_size = x.shape[0]
x_shift = x - np.max(x, axis=1, keepdims=True)
exp_x = np.exp(x_shift)
self.softmax_output = exp_x / np.sum(exp_x, axis=1, keepdims=True)
loss = -np.sum(y * np.log(self.softmax_output + 1e-9)) / self.batch_size
return loss
def backward(self, y):
dx = self.softmax_output - y
dx /= self.batch_size
return dx
```
这个实现包括了常见的卷积层、池化层、全连接层、ReLU激活函数和Softmax交叉熵损失函数。你可以使用这个实现来构建和训练卷积神经网络。
### 回答2:
当然可以帮您写一个Python版的卷积神经网络。下面是一个简单的示例代码:
```python
import numpy as np
# 定义卷积层类
class ConvLayer:
def __init__(self, input_shape, num_filters, filter_size, stride):
self.input_shape = input_shape
self.num_filters = num_filters
self.filter_size = filter_size
self.stride = stride
self.filters = np.random.randn(num_filters, filter_size, filter_size) / (filter_size**2)
self.output_shape = (self.num_filters, (input_shape[1]-filter_size)//stride+1, (input_shape[2]-filter_size)//stride+1)
def forward(self, input):
self.input = input
batch_size, num_input_channels, input_height, input_width = input.shape
output_height = (input_height - self.filter_size) // self.stride + 1
output_width = (input_width - self.filter_size) // self.stride + 1
output = np.zeros((batch_size, self.num_filters, output_height, output_width))
for i in range(batch_size):
for f in range(self.num_filters):
for j in range(0, input_height - self.filter_size + 1, self.stride):
for k in range(0, input_width - self.filter_size + 1, self.stride):
output[i, f, j//self.stride, k//self.stride] = np.sum(input[i, :, j:j+self.filter_size, k:k+self.filter_size] * self.filters[f])
return output
# 定义池化层类
class PoolingLayer:
def __init__(self, pool_size, stride):
self.pool_size = pool_size
self.stride = stride
def forward(self, input):
self.input = input
batch_size, num_channels, input_height, input_width = input.shape
output_height = (input_height - self.pool_size) // self.stride + 1
output_width = (input_width - self.pool_size) // self.stride + 1
output = np.zeros((batch_size, num_channels, output_height, output_width))
for i in range(batch_size):
for c in range(num_channels):
for j in range(0, input_height - self.pool_size + 1, self.stride):
for k in range(0, input_width - self.pool_size + 1, self.stride):
output[i, c, j//self.stride, k//self.stride] = np.max(input[i, c, j:j+self.pool_size, k:k+self.pool_size])
return output
# 构建一个简单的卷积神经网络
class ConvNet:
def __init__(self):
self.conv1 = ConvLayer(input_shape=(3, 32, 32), num_filters=16, filter_size=5, stride=1)
self.pool1 = PoolingLayer(pool_size=2, stride=2)
self.conv2 = ConvLayer(input_shape=self.pool1.output_shape, num_filters=32, filter_size=3, stride=1)
self.pool2 = PoolingLayer(pool_size=2, stride=2)
self.fc = FullyConnectedLayer(input_size=32*6*6, output_size=10)
def forward(self, input):
output = self.conv1.forward(input)
output = self.pool1.forward(output)
output = self.conv2.forward(output)
output = self.pool2.forward(output)
output = output.reshape(output.shape[0], -1)
output = self.fc.forward(output)
return output
# 测试代码
input = np.random.randn(10, 3, 32, 32)
net = ConvNet()
output = net.forward(input)
print(output.shape)
```
以上代码是一个简单的卷积神经网络的实现,包含了卷积层和池化层的定义和前向传播的方法。您可以根据自己的需求进行修改和扩展。请注意,这只是一个简单的示例,实际应用中可能还需要添加其他层和功能。
阅读全文