请补全以下代码:class AttModel(nn.Module): def __init__(self, n_input, n_hidden, seq_len): """ n_input: 单词数量 n_hidden: hidden state维度 sequence_len: 输入文本的长度 """ super(Model, self).__init__() # 传入参数 self.hidden_dim = n_hidden self.input_size = n_input self.output_size = n_input self.n_layers = 1 # Global Attention机制需要使用RNN的最大Timestep数 #即需要计算当前timestep和多少timestep的相似度权重(Alignment Weight) self.max_length = 10 # 定义结构 # RNN层 可参考 https://pytorch.org/docs/stable/generated/torch.nn.RNN.html self.rnn = nn.RNN(self.input_size,self.hidden_dim,self.n_layers,batch_first=True) # 注意力层-用于计算score self.attn = torch.nn.Linear(in_features=, out_features=, bias=False) # 注意力层-用于已经拼接了ct和ht后的变换。 self.w_c = torch.nn.Linear(in_features=, out_features=) # 全联接层 可参考 https://pytorch.org/docs/stable/generated/torch.nn.Linear.html self.fc = nn.Linear()
时间: 2023-06-10 08:08:41 浏览: 210
class AttModel(nn.Module):
def __init__(self, n_input, n_hidden, seq_len):
"""
n_input: 单词数量
n_hidden: hidden state维度
sequence_len: 输入文本的长度
"""
super(AttModel, self).__init__() # 正确的调用父类的方法
self.n_input = n_input
self.n_hidden = n_hidden
self.seq_len = seq_len
self.linear = nn.Linear(n_hidden, n_hidden)
self.encoder = nn.Embedding(n_input, n_hidden)
self.attention = nn.Linear(n_hidden, 1)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, self.seq_len, self.n_hidden)
e = torch.tanh(self.linear(x))
a = torch.softmax(self.attention(e), dim=1)
h = torch.bmm(a.permute(0, 2, 1), x).squeeze()
return h
阅读全文