代码总览

class Transformer(nn.Module):

    def __init__(self, src_pad_idx, trg_pad_idx, trg_sos_idx, enc_voc_size, dec_voc_size, d_model, n_head, max_len,
                 ffn_hidden, n_layers, drop_prob, device):
        super().__init__()
        self.src_pad_idx = src_pad_idx
        self.trg_pad_idx = trg_pad_idx
        self.trg_sos_idx = trg_sos_idx
        self.device = device
        self.encoder = Encoder(d_model=d_model,
                               n_head=n_head,
                               max_len=max_len,
                               ffn_hidden=ffn_hidden,
                               enc_voc_size=enc_voc_size,
                               drop_prob=drop_prob,
                               n_layers=n_layers,
                               device=device)

        self.decoder = Decoder(d_model=d_model,
                               n_head=n_head,
                               max_len=max_len,
                               ffn_hidden=ffn_hidden,
                               dec_voc_size=dec_voc_size,
                               drop_prob=drop_prob,
                               n_layers=n_layers,
                               device=device)

    def forward(self, src, trg):
        src_mask = self.make_src_mask(src)
        trg_mask = self.make_trg_mask(trg)
        enc_src = self.encoder(src, src_mask)
        output = self.decoder(trg, enc_src, trg_mask, src_mask)
        return output

    def make_src_mask(self, src):
        src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)
        return src_mask

    def make_trg_mask(self, trg):
        trg_pad_mask = (trg != self.trg_pad_idx).unsqueeze(1).unsqueeze(3)
        trg_len = trg.shape[1]
        trg_sub_mask = torch.tril(torch.ones(trg_len, trg_len)).type(torch.ByteTensor).to(self.device)
        trg_mask = trg_pad_mask & trg_sub_mask
        return trg_mask
image-20240316142251107

Embedding

Embedding分为Token Embedding和Position Encoding,二者结果直接相加,之后先layernorm(原始transformer里没,bert有)再dropout。

token Embedding的实现为

tok_emb = nn.Embedding(vocab_size,d_model,pad_id)

Pos emb稍微比较麻烦。Transformer用的是绝对位置编码,与token emb直接相加。其中i是 \[ \text{PE}_{(pos, 2i)} = \sin\left(\frac{pos}{10000^{2i/d_{\text{model}}}}\right) \\ \text{PE}_{(pos, 2i+1)} = \cos\left(\frac{pos}{10000^{2i/d_{\text{model}}}}\right) \]

class PositionEncoding(nn.Module):
	def __init__(self,d_model,max_len,device):
        super().__init__()
        self.encoding = torch.zeros(max_len,d_model,device=device)
        self.encoding.requires_grad = False
        pos = torch.arange(d_model,device=device)
        pos = pos.float().unsqueeze(dim=1)
        _2i = torch.arange(0,max_len,step=2,device=device).float() 
        self.encoding[:,0::2] = torch.sin(pos/10000**(_2i/d_model))
        self.encoding[:,1::2] = torch.cos(pos/10000**(_2i/d_model))

Encoder

整个encoder层有两次残差连接,并且需要把被padding的部分带掩码上。被mask的地方可以设置为一个非常非常小的负数,让他对softmax贡献降低。

image-20240316154402911

MultiHeadAttention

class MultiHeadAttention(nn.Module):

    def __init__(self, d_model, n_head):
        super(MultiHeadAttention, self).__init__()
        self.n_head = n_head
        self.attention = ScaleDotProductAttention()
        self.w_q = nn.Linear(d_model, d_model)
        self.w_k = nn.Linear(d_model, d_model)
        self.w_v = nn.Linear(d_model, d_model)
        self.w_concat = nn.Linear(d_model, d_model) #  Wo

    def forward(self, q, k, v, mask=None):
        # 1. dot product with weight matrices
        q, k, v = self.w_q(q), self.w_k(k), self.w_v(v)

        # 2. split tensor by number of heads
        q, k, v = self.split(q), self.split(k), self.split(v)

        # 3. do scale dot product to compute similarity
        out, attention = self.attention(q, k, v, mask=mask)

        # 4. concat and pass to linear layer
        out = self.concat(out)
        out = self.w_concat(out)

        # 5. visualize attention map
        # TODO : we should implement visualization

        return out
image-20240315232326148

用点积注意力可以高度并行,对于较大的dk值,加法注意力在性能上优于无缩放的点积注意力,除以根号dk的原因是怕softmax输出的梯度太小。

https://blog.csdn.net/qq_37430422/article/details/105042303

$ Attention(Q,K,V) = softmax()V$

Scaled Dot-Product Attention

Q,K,V会经过一个线性层(d,d),然后拆分送给多个Attention,这里请注意,是把序列给拆了!不是把tensor铺开来然后划分,所以需要坐transpose

tensor = tensor.view(batch_size, length, self.n_head, d_tensor).transpose(1, 2)

之后将q,k,v传入点积attention

class ScaleDotProductAttention(nn.Module):
    """
    compute scale dot product attention

    Query : given sentence that we focused on (decoder)
    Key : every sentence to check relationship with Qeury(encoder)
    Value : every sentence same with Key (encoder)
    """

    def __init__(self):
        super(ScaleDotProductAttention, self).__init__()
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, q, k, v, mask=None, e=1e-12):
        # input is 4 dimension tensor
        # [batch_size, head, length, d_tensor]
        batch_size, head, length, d_tensor = k.size()

        # 1. dot product Query with Key^T to compute similarity
        k_t = k.transpose(2, 3)  # transpose
        score = (q @ k_t) / math.sqrt(d_tensor)  # scaled dot product

        # 2. apply masking (opt)
        if mask is not None:
            score = score.masked_fill(mask == 0, -10000)

        # 3. pass them softmax to make [0, 1] range
        score = self.softmax(score)

        # 4. multiply with Value
        v = score @ v

        return v, score

每一个head并行计算完attention后,重新concat起来,Wo如果在head能被dmodel整除的情况下,拼起来就是一个512×512的矩阵。

ADD & NORM

x = self.attention(q=x, k=x, v=x, mask=src_mask)

# 2. add and norm
x = self.dropout1(x)
x = self.norm1(x + _x)

Position-wise Feed-Forward Networks

这个做法可以增加模型的表示能力,并且增加了模型学习的灵活性。通过将维度映射到更高的维度,模型能够学习更复杂的特征,然后通过第二个线性层将其映射回原始的维度,以保留原始输入的信息。这种设计可以帮助模型更好地学习输入序列之间的复杂关系,并且提高模型的性能。原始的隐藏层是2048

class PositionwiseFeedForward(nn.Module):

    def __init__(self, d_model, hidden, drop_prob=0.1):
        super(PositionwiseFeedForward, self).__init__()
        self.linear1 = nn.Linear(d_model, hidden)
        self.linear2 = nn.Linear(hidden, d_model)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(p=drop_prob)

    def forward(self, x):
        x = self.linear1(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.linear2(x)
        return x

ADD & Norm

# 3. positionwise feed forward network
_x = x
x = self.ffn(x)
      
# 4. add and norm
x = self.dropout2(x)
x = self.norm2(x + _x)
return x

Decoder

在训练时,Decoder的emb输入不是上一步生成的,而是目标序列。Decoder有三次残差

class Decoder(nn.Module):
    def __init__(self, dec_voc_size, max_len, d_model, ffn_hidden, n_head, n_layers, drop_prob, device):
        super().__init__()
        self.emb = TransformerEmbedding(d_model=d_model,
                                        drop_prob=drop_prob,
                                        max_len=max_len,
                                        vocab_size=dec_voc_size,
                                        device=device)

        self.layers = nn.ModuleList([DecoderLayer(d_model=d_model,
                                                  ffn_hidden=ffn_hidden,
                                                  n_head=n_head,
                                                  drop_prob=drop_prob)
                                     for _ in range(n_layers)])

        self.linear = nn.Linear(d_model, dec_voc_size)

    def forward(self, trg, enc_src, trg_mask, src_mask):
        trg = self.emb(trg)

        for layer in self.layers:
            trg = layer(trg, enc_src, trg_mask, src_mask)

        # pass to LM head
        output = self.linear(trg)
        return output

MaskedMultiHeadAttention

整体实现和Encoder的一样,但需要带Masked,实际上,在Encoder中,被pad的地方也要被Masked。

class DecoderLayer(nn.Module):

    def __init__(self, d_model, ffn_hidden, n_head, drop_prob):
        super(DecoderLayer, self).__init__()
        self.self_attention = MultiHeadAttention(d_model=d_model, n_head=n_head)
        self.norm1 = LayerNorm(d_model=d_model)
        self.dropout1 = nn.Dropout(p=drop_prob)

        self.enc_dec_attention = MultiHeadAttention(d_model=d_model, n_head=n_head)
        self.norm2 = LayerNorm(d_model=d_model)
        self.dropout2 = nn.Dropout(p=drop_prob)

        self.ffn = PositionwiseFeedForward(d_model=d_model, hidden=ffn_hidden, drop_prob=drop_prob)
        self.norm3 = LayerNorm(d_model=d_model)
        self.dropout3 = nn.Dropout(p=drop_prob)

Decoder的mask部分相对来说比较复杂。首先需要把被padding的部分先mask上,然后生成一个下三角矩阵。

这里的QKV都来自于上一步,训练中则都是目标。

MultiHeadAttention

这里和Encoder唯一的区别就是Q来自Decoder中output的emb经过一个Masked注意力来的,其余KV都是从encoder进来的,

这是因为Decoder 需要关注源序列的信息,以便对当前位置进行合适的预测。这种信息主要来自于 Encoder 的输出,因为 Encoder 能够将源序列的信息编码为一个上下文向量序列,Decoder 通过关注这些上下文向量,能够获取源序列的语义信息。Decoder 还需要考虑已经生成的部分目标序列,以便保持生成的连贯性和合理性。这种信息主要来自于 Decoder 自身的前面层输出,因为 Decoder 在每个时间步都会生成一个新的目标序列标记,这些标记构成了一个逐步生成的序列。

Position-wise Feed-Forward Networks

同Encoder

Linear

self.linear = nn.Linear(d_model, dec_voc_size)

softmax

训练时不需要,推理时才需要