深度强化学习代码gpt
时间: 2023-09-21 09:03:09 浏览: 179
深度学习代码
以下是一个基于PyTorch实现的GPT-2模型的代码示例,包括Transformer Encoder和Transformer Decoder部分。
首先,我们需要导入必要的库:
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
```
然后,定义一个函数用于初始化权重:
```python
def init_weights(module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
```
接下来,定义Transformer Encoder部分:
```python
class TransformerEncoder(nn.Module):
def __init__(self, num_layers, hidden_size, num_heads, dropout_rate):
super(TransformerEncoder, self).__init__()
self.layers = nn.ModuleList()
self.num_layers = num_layers
self.norm = nn.LayerNorm(hidden_size)
for _ in range(num_layers):
self.layers.append(TransformerEncoderLayer(hidden_size, num_heads, dropout_rate))
def forward(self, x, mask=None):
for layer in self.layers:
x = layer(x, mask)
x = self.norm(x)
return x
```
Transformer Encoder由多个Transformer Encoder Layer组成,每个Encoder Layer包含self-attention、feedforward和残差连接等操作。下面是Encoder Layer的代码实现:
```python
class TransformerEncoderLayer(nn.Module):
def __init__(self, hidden_size, num_heads, dropout_rate):
super(TransformerEncoderLayer, self).__init__()
self.self_attention = MultiHeadAttention(hidden_size, num_heads, dropout_rate)
self.feed_forward = FeedForward(hidden_size, dropout_rate)
self.norm1 = nn.LayerNorm(hidden_size)
self.norm2 = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x, mask=None):
residual = x
x = self.norm1(x)
x = self.self_attention(x, x, x, mask)
x = residual + self.dropout(x)
residual = x
x = self.norm2(x)
x = self.feed_forward(x)
x = residual + self.dropout(x)
return x
```
接下来,定义Transformer Decoder部分:
```python
class TransformerDecoder(nn.Module):
def __init__(self, num_layers, hidden_size, num_heads, dropout_rate):
super(TransformerDecoder, self).__init__()
self.layers = nn.ModuleList()
self.num_layers = num_layers
self.norm = nn.LayerNorm(hidden_size)
for _ in range(num_layers):
self.layers.append(TransformerDecoderLayer(hidden_size, num_heads, dropout_rate))
def forward(self, x, memory, src_mask=None, tgt_mask=None):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
x = self.norm(x)
return x
```
与Transformer Encoder类似,Transformer Decoder也由多个Transformer Decoder Layer组成。下面是Decoder Layer的代码实现:
```python
class TransformerDecoderLayer(nn.Module):
def __init__(self, hidden_size, num_heads, dropout_rate):
super(TransformerDecoderLayer, self).__init__()
self.self_attention = MultiHeadAttention(hidden_size, num_heads, dropout_rate)
self.src_attention = MultiHeadAttention(hidden_size, num_heads, dropout_rate)
self.feed_forward = FeedForward(hidden_size, dropout_rate)
self.norm1 = nn.LayerNorm(hidden_size)
self.norm2 = nn.LayerNorm(hidden_size)
self.norm3 = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x, memory, src_mask=None, tgt_mask=None):
residual = x
x = self.norm1(x)
x = self.self_attention(x, x, x, tgt_mask)
x = residual + self.dropout(x)
residual = x
x = self.norm2(x)
x = self.src_attention(x, memory, memory, src_mask)
x = residual + self.dropout(x)
residual = x
x = self.norm3(x)
x = self.feed_forward(x)
x = residual + self.dropout(x)
return x
```
最后,我们需要定义Multi-Head Attention和FeedForward层:
```python
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, num_heads, dropout_rate):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.head_size = hidden_size // num_heads
self.query = nn.Linear(hidden_size, hidden_size)
self.key = nn.Linear(hidden_size, hidden_size)
self.value = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(p=dropout_rate)
self.output = nn.Linear(hidden_size, hidden_size)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
query = self.query(query).view(batch_size, -1, self.num_heads, self.head_size).transpose(1, 2)
key = self.key(key).view(batch_size, -1, self.num_heads, self.head_size).transpose(1, 2)
value = self.value(value).view(batch_size, -1, self.num_heads, self.head_size).transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
scores = query.matmul(key.transpose(-2, -1)) / self.head_size**0.5
scores.masked_fill_(mask == 0, -1e9)
attn_weights = F.softmax(scores, dim=-1)
attn_weights = self.dropout(attn_weights)
attn_output = attn_weights.matmul(value)
else:
scores = query.matmul(key.transpose(-2, -1)) / self.head_size**0.5
attn_weights = F.softmax(scores, dim=-1)
attn_weights = self.dropout(attn_weights)
attn_output = attn_weights.matmul(value)
attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.num_heads * self.head_size)
output = self.output(attn_output)
return output
class FeedForward(nn.Module):
def __init__(self, hidden_size, dropout_rate):
super(FeedForward, self).__init__()
self.fc1 = nn.Linear(hidden_size, hidden_size * 4)
self.fc2 = nn.Linear(hidden_size * 4, hidden_size)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
```
以上就是一个简单的GPT-2模型的代码实现,可以根据需要进行修改和优化。
阅读全文