| """ |
| MIT License |
| |
| Copyright (c) 2021 Wilson Yan |
| |
| Permission is hereby granted, free of charge, to any person obtaining a copy |
| of this software and associated documentation files (the "Software"), to deal |
| in the Software without restriction, including without limitation the rights |
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| copies of the Software, and to permit persons to whom the Software is |
| furnished to do so, subject to the following conditions: |
| |
| The above copyright notice and this permission notice shall be included in all |
| copies or substantial portions of the Software. |
| |
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| SOFTWARE. |
| |
| |
| This file is copied from https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/attention.py |
| We adapted it to Hugging Face AutoModel for easier model loading. |
| """ |
|
|
|
|
| import numpy as np |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch.utils.checkpoint import checkpoint |
|
|
| from ._utils import shift_dim, view_range, tensor_slice |
|
|
|
|
| class AttentionStack(nn.Module): |
| def __init__( |
| self, shape, embd_dim, n_head, n_layer, dropout, |
| attn_type, attn_dropout, class_cond_dim, frame_cond_shape, |
| ): |
| super().__init__() |
| self.shape = shape |
| self.embd_dim = embd_dim |
| self.use_frame_cond = frame_cond_shape is not None |
|
|
| self.right_shift = RightShift(embd_dim) |
| self.pos_embd = AddBroadcastPosEmbed( |
| shape=shape, embd_dim=embd_dim |
| ) |
|
|
| self.attn_nets = nn.ModuleList( |
| [ |
| AttentionBlock( |
| shape=shape, |
| embd_dim=embd_dim, |
| n_head=n_head, |
| n_layer=n_layer, |
| dropout=dropout, |
| attn_type=attn_type, |
| attn_dropout=attn_dropout, |
| class_cond_dim=class_cond_dim, |
| frame_cond_shape=frame_cond_shape |
| ) |
| for i in range(n_layer) |
| ] |
| ) |
|
|
| def forward(self, x, cond, decode_step, decode_idx): |
| """ |
| Args |
| ------ |
| x: (b, d1, d2, ..., dn, embd_dim) |
| cond: a dictionary of conditioning tensors |
| |
| (below is used only when sampling for fast decoding) |
| decode: the enumerated rasterscan order of the current idx being sampled |
| decode_step: a tuple representing the current idx being sampled |
| """ |
| x = self.right_shift(x, decode_step) |
| x = self.pos_embd(x, decode_step, decode_idx) |
| for net in self.attn_nets: |
| x = net(x, cond, decode_step, decode_idx) |
|
|
| return x |
|
|
|
|
| class AttentionBlock(nn.Module): |
| def __init__(self, shape, embd_dim, n_head, n_layer, dropout, |
| attn_type, attn_dropout, class_cond_dim, frame_cond_shape): |
| super().__init__() |
| self.use_frame_cond = frame_cond_shape is not None |
|
|
| self.pre_attn_norm = LayerNorm(embd_dim, class_cond_dim) |
| self.post_attn_dp = nn.Dropout(dropout) |
| self.attn = MultiHeadAttention(shape, embd_dim, embd_dim, n_head, |
| n_layer, causal=True, attn_type=attn_type, |
| attn_kwargs=dict(attn_dropout=attn_dropout)) |
|
|
| if frame_cond_shape is not None: |
| enc_len = np.prod(frame_cond_shape[:-1]) |
| self.pre_enc_norm = LayerNorm(embd_dim, class_cond_dim) |
| self.post_enc_dp = nn.Dropout(dropout) |
| self.enc_attn = MultiHeadAttention(shape, embd_dim, frame_cond_shape[-1], |
| n_head, n_layer, attn_type='full', |
| attn_kwargs=dict(attn_dropout=0.), causal=False) |
|
|
| self.pre_fc_norm = LayerNorm(embd_dim, class_cond_dim) |
| self.post_fc_dp = nn.Dropout(dropout) |
| self.fc_block = nn.Sequential( |
| nn.Linear(in_features=embd_dim, out_features=embd_dim * 4), |
| GeLU2(), |
| nn.Linear(in_features=embd_dim * 4, out_features=embd_dim), |
| ) |
|
|
| def forward(self, x, cond, decode_step, decode_idx): |
| h = self.pre_attn_norm(x, cond) |
| if self.training: |
| h = checkpoint(self.attn, h, h, h, decode_step, decode_idx) |
| else: |
| h = self.attn(h, h, h, decode_step, decode_idx) |
| h = self.post_attn_dp(h) |
| x = x + h |
|
|
| if self.use_frame_cond: |
| h = self.pre_enc_norm(x, cond) |
| if self.training: |
| h = checkpoint(self.enc_attn, h, cond['frame_cond'], cond['frame_cond'], |
| decode_step, decode_idx) |
| else: |
| h = self.enc_attn(h, cond['frame_cond'], cond['frame_cond'], |
| decode_step, decode_idx) |
| h = self.post_enc_dp(h) |
| x = x + h |
|
|
| h = self.pre_fc_norm(x, cond) |
| if self.training: |
| h = checkpoint(self.fc_block, h) |
| else: |
| h = self.fc_block(h) |
| h = self.post_fc_dp(h) |
| x = x + h |
|
|
| return x |
|
|
|
|
| class MultiHeadAttention(nn.Module): |
| def __init__(self, shape, dim_q, dim_kv, n_head, n_layer, |
| causal, attn_type, attn_kwargs): |
| super().__init__() |
| self.causal = causal |
| self.shape = shape |
|
|
| self.d_k = dim_q // n_head |
| self.d_v = dim_kv // n_head |
| self.n_head = n_head |
|
|
| self.w_qs = nn.Linear(dim_q, n_head * self.d_k, bias=False) |
| self.w_qs.weight.data.normal_(std=1.0 / np.sqrt(dim_q)) |
|
|
| self.w_ks = nn.Linear(dim_kv, n_head * self.d_k, bias=False) |
| self.w_ks.weight.data.normal_(std=1.0 / np.sqrt(dim_kv)) |
|
|
| self.w_vs = nn.Linear(dim_kv, n_head * self.d_v, bias=False) |
| self.w_vs.weight.data.normal_(std=1.0 / np.sqrt(dim_kv)) |
|
|
| self.fc = nn.Linear(n_head * self.d_v, dim_q, bias=True) |
| self.fc.weight.data.normal_(std=1.0 / np.sqrt(dim_q * n_layer)) |
|
|
| if attn_type == 'full': |
| self.attn = FullAttention(shape, causal, **attn_kwargs) |
| elif attn_type == 'axial': |
| assert not causal, 'causal axial attention is not supported' |
| self.attn = AxialAttention(len(shape), **attn_kwargs) |
| elif attn_type == 'sparse': |
| self.attn = SparseAttention(shape, n_head, causal, **attn_kwargs) |
|
|
| self.cache = None |
|
|
| def forward(self, q, k, v, decode_step=None, decode_idx=None): |
| """ Compute multi-head attention |
| Args |
| q, k, v: a [b, d1, ..., dn, c] tensor or |
| a [b, 1, ..., 1, c] tensor if decode_step is not None |
| |
| Returns |
| The output after performing attention |
| """ |
|
|
| |
| d_k, d_v, n_head = self.d_k, self.d_v, self.n_head |
| q = view_range(self.w_qs(q), -1, None, (n_head, d_k)) |
| k = view_range(self.w_ks(k), -1, None, (n_head, d_k)) |
| v = view_range(self.w_vs(v), -1, None, (n_head, d_v)) |
|
|
| |
| |
| q = shift_dim(q, -2, 1) |
| k = shift_dim(k, -2, 1) |
| v = shift_dim(v, -2, 1) |
|
|
| |
| if decode_step is not None: |
| if decode_step == 0: |
| if self.causal: |
| k_shape = (q.shape[0], n_head, *self.shape, self.d_k) |
| v_shape = (q.shape[0], n_head, *self.shape, self.d_v) |
| self.cache = dict(k=torch.zeros(k_shape, dtype=k.dtype, device=q.device), |
| v=torch.zeros(v_shape, dtype=v.dtype, device=q.device)) |
| else: |
| |
| self.cache = dict(k=k.clone(), v=v.clone()) |
| if self.causal: |
| idx = (slice(None, None), slice(None, None), *[slice(i, i+ 1) for i in decode_idx]) |
| self.cache['k'][idx] = k |
| self.cache['v'][idx] = v |
| k, v = self.cache['k'], self.cache['v'] |
|
|
| a = self.attn(q, k, v, decode_step, decode_idx) |
|
|
| |
| a = shift_dim(a, 1, -2).flatten(start_dim=-2) |
| a = self.fc(a) |
|
|
| return a |
|
|
| |
| class FullAttention(nn.Module): |
| def __init__(self, shape, causal, attn_dropout): |
| super().__init__() |
| self.causal = causal |
| self.attn_dropout = attn_dropout |
|
|
| seq_len = np.prod(shape) |
| if self.causal: |
| self.register_buffer('mask', torch.tril(torch.ones(seq_len, seq_len))) |
|
|
| def forward(self, q, k, v, decode_step, decode_idx): |
| mask = self.mask if self.causal else None |
| if decode_step is not None and mask is not None: |
| mask = mask[[decode_step]] |
|
|
| old_shape = q.shape[2:-1] |
| q = q.flatten(start_dim=2, end_dim=-2) |
| k = k.flatten(start_dim=2, end_dim=-2) |
| v = v.flatten(start_dim=2, end_dim=-2) |
|
|
| out = scaled_dot_product_attention(q, k, v, mask=mask, |
| attn_dropout=self.attn_dropout, |
| training=self.training) |
|
|
| return view_range(out, 2, 3, old_shape) |
|
|
| class AxialAttention(nn.Module): |
| def __init__(self, n_dim, axial_dim): |
| super().__init__() |
| if axial_dim < 0: |
| axial_dim = 2 + n_dim + 1 + axial_dim |
| else: |
| axial_dim += 2 |
| self.axial_dim = axial_dim |
|
|
| def forward(self, q, k, v, decode_step, decode_idx): |
| q = shift_dim(q, self.axial_dim, -2).flatten(end_dim=-3) |
| k = shift_dim(k, self.axial_dim, -2).flatten(end_dim=-3) |
| v = shift_dim(v, self.axial_dim, -2) |
| old_shape = list(v.shape) |
| v = v.flatten(end_dim=-3) |
|
|
| out = scaled_dot_product_attention(q, k, v, training=self.training) |
| out = out.view(*old_shape) |
| out = shift_dim(out, -2, self.axial_dim) |
| return out |
|
|
|
|
| class SparseAttention(nn.Module): |
| ops = dict() |
| attn_mask = dict() |
| block_layout = dict() |
|
|
| def __init__(self, shape, n_head, causal, num_local_blocks=4, block=32, |
| attn_dropout=0.): |
| super().__init__() |
| self.causal = causal |
| self.shape = shape |
|
|
| self.sparsity_config = StridedSparsityConfig(shape=shape, n_head=n_head, |
| causal=causal, block=block, |
| num_local_blocks=num_local_blocks) |
|
|
| if self.shape not in SparseAttention.block_layout: |
| SparseAttention.block_layout[self.shape] = self.sparsity_config.make_layout() |
| if causal and self.shape not in SparseAttention.attn_mask: |
| SparseAttention.attn_mask[self.shape] = self.sparsity_config.make_sparse_attn_mask() |
|
|
| def get_ops(self): |
| try: |
| from deepspeed.ops.sparse_attention import MatMul, Softmax |
| except: |
| raise Exception('Error importing deepspeed. Please install using `DS_BUILD_SPARSE_ATTN=1 pip install deepspeed`') |
| if self.shape not in SparseAttention.ops: |
| sparsity_layout = self.sparsity_config.make_layout() |
| sparse_dot_sdd_nt = MatMul(sparsity_layout, |
| self.sparsity_config.block, |
| 'sdd', |
| trans_a=False, |
| trans_b=True) |
|
|
| sparse_dot_dsd_nn = MatMul(sparsity_layout, |
| self.sparsity_config.block, |
| 'dsd', |
| trans_a=False, |
| trans_b=False) |
|
|
| sparse_softmax = Softmax(sparsity_layout, self.sparsity_config.block) |
|
|
| SparseAttention.ops[self.shape] = (sparse_dot_sdd_nt, |
| sparse_dot_dsd_nn, |
| sparse_softmax) |
| return SparseAttention.ops[self.shape] |
|
|
| def forward(self, q, k, v, decode_step, decode_idx): |
| if self.training and self.shape not in SparseAttention.ops: |
| self.get_ops() |
|
|
| SparseAttention.block_layout[self.shape] = SparseAttention.block_layout[self.shape].to(q) |
| if self.causal: |
| SparseAttention.attn_mask[self.shape] = SparseAttention.attn_mask[self.shape].to(q).type_as(q) |
| attn_mask = SparseAttention.attn_mask[self.shape] if self.causal else None |
|
|
| old_shape = q.shape[2:-1] |
| q = q.flatten(start_dim=2, end_dim=-2) |
| k = k.flatten(start_dim=2, end_dim=-2) |
| v = v.flatten(start_dim=2, end_dim=-2) |
|
|
| if decode_step is not None: |
| mask = self.sparsity_config.get_non_block_layout_row(SparseAttention.block_layout[self.shape], decode_step) |
| out = scaled_dot_product_attention(q, k, v, mask=mask, training=self.training) |
| else: |
| if q.shape != k.shape or k.shape != v.shape: |
| raise Exception('SparseAttention only support self-attention') |
| sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax = self.get_ops() |
| scaling = float(q.shape[-1]) ** -0.5 |
|
|
| attn_output_weights = sparse_dot_sdd_nt(q, k) |
| if attn_mask is not None: |
| attn_output_weights = attn_output_weights.masked_fill(attn_mask == 0, |
| float('-inf')) |
| attn_output_weights = sparse_softmax( |
| attn_output_weights, |
| scale=scaling |
| ) |
|
|
| out = sparse_dot_dsd_nn(attn_output_weights, v) |
|
|
| return view_range(out, 2, 3, old_shape) |
|
|
|
|
| class StridedSparsityConfig(object): |
| """ |
| Strided Sparse configuration specified in https://arxiv.org/abs/1904.10509 that |
| generalizes to arbitrary dimensions |
| """ |
| def __init__(self, shape, n_head, causal, block, num_local_blocks): |
| self.n_head = n_head |
| self.shape = shape |
| self.causal = causal |
| self.block = block |
| self.num_local_blocks = num_local_blocks |
|
|
| assert self.num_local_blocks >= 1, 'Must have at least 1 local block' |
| assert self.seq_len % self.block == 0, 'seq len must be divisible by block size' |
|
|
| self._block_shape = self._compute_block_shape() |
| self._block_shape_cum = self._block_shape_cum_sizes() |
|
|
| @property |
| def seq_len(self): |
| return np.prod(self.shape) |
|
|
| @property |
| def num_blocks(self): |
| return self.seq_len // self.block |
|
|
| def set_local_layout(self, layout): |
| num_blocks = self.num_blocks |
| for row in range(0, num_blocks): |
| end = min(row + self.num_local_blocks, num_blocks) |
| for col in range( |
| max(0, row - self.num_local_blocks), |
| (row + 1 if self.causal else end)): |
| layout[:, row, col] = 1 |
| return layout |
|
|
| def set_global_layout(self, layout): |
| num_blocks = self.num_blocks |
| n_dim = len(self._block_shape) |
| for row in range(num_blocks): |
| assert self._to_flattened_idx(self._to_unflattened_idx(row)) == row |
| cur_idx = self._to_unflattened_idx(row) |
| |
| for d in range(n_dim - 1): |
| end = self._block_shape[d] |
| for i in range(0, (cur_idx[d] + 1 if self.causal else end)): |
| new_idx = list(cur_idx) |
| new_idx[d] = i |
| new_idx = tuple(new_idx) |
|
|
| col = self._to_flattened_idx(new_idx) |
| layout[:, row, col] = 1 |
|
|
| return layout |
|
|
| def make_layout(self): |
| layout = torch.zeros((self.n_head, self.num_blocks, self.num_blocks), dtype=torch.int64) |
| layout = self.set_local_layout(layout) |
| layout = self.set_global_layout(layout) |
| return layout |
|
|
| def make_sparse_attn_mask(self): |
| block_layout = self.make_layout() |
| assert block_layout.shape[1] == block_layout.shape[2] == self.num_blocks |
|
|
| num_dense_blocks = block_layout.sum().item() |
| attn_mask = torch.ones(num_dense_blocks, self.block, self.block) |
| counter = 0 |
| for h in range(self.n_head): |
| for i in range(self.num_blocks): |
| for j in range(self.num_blocks): |
| elem = block_layout[h, i, j].item() |
| if elem == 1: |
| assert i >= j |
| if i == j: |
| attn_mask[counter] = torch.tril(attn_mask[counter]) |
| counter += 1 |
| assert counter == num_dense_blocks |
|
|
| return attn_mask.unsqueeze(0) |
|
|
| def get_non_block_layout_row(self, block_layout, row): |
| block_row = row // self.block |
| block_row = block_layout[:, [block_row]] |
| block_row = block_row.repeat_interleave(self.block, dim=-1) |
| block_row[:, :, row + 1:] = 0. |
| return block_row |
|
|
| |
|
|
| def _compute_block_shape(self): |
| n_dim = len(self.shape) |
| cum_prod = 1 |
| for i in range(n_dim - 1, -1, -1): |
| cum_prod *= self.shape[i] |
| if cum_prod > self.block: |
| break |
| assert cum_prod % self.block == 0 |
| new_shape = (*self.shape[:i], cum_prod // self.block) |
|
|
| assert np.prod(new_shape) == np.prod(self.shape) // self.block |
|
|
| return new_shape |
|
|
| def _block_shape_cum_sizes(self): |
| bs = np.flip(np.array(self._block_shape)) |
| return tuple(np.flip(np.cumprod(bs)[:-1])) + (1,) |
|
|
| def _to_flattened_idx(self, idx): |
| assert len(idx) == len(self._block_shape), f"{len(idx)} != {len(self._block_shape)}" |
| flat_idx = 0 |
| for i in range(len(self._block_shape)): |
| flat_idx += idx[i] * self._block_shape_cum[i] |
| return flat_idx |
|
|
| def _to_unflattened_idx(self, flat_idx): |
| assert flat_idx < np.prod(self._block_shape) |
| idx = [] |
| for i in range(len(self._block_shape)): |
| idx.append(flat_idx // self._block_shape_cum[i]) |
| flat_idx %= self._block_shape_cum[i] |
| return tuple(idx) |
|
|
|
|
| |
| class AddBroadcastPosEmbed(nn.Module): |
| def __init__(self, shape, embd_dim, dim=-1): |
| super().__init__() |
| assert dim in [-1, 1] |
| self.shape = shape |
| self.n_dim = n_dim = len(shape) |
| self.embd_dim = embd_dim |
| self.dim = dim |
|
|
| assert embd_dim % n_dim == 0, f"{embd_dim} % {n_dim} != 0" |
| self.emb = nn.ParameterDict({ |
| f'd_{i}': nn.Parameter(torch.randn(shape[i], embd_dim // n_dim) * 0.01 |
| if dim == -1 else |
| torch.randn(embd_dim // n_dim, shape[i]) * 0.01) |
| for i in range(n_dim) |
| }) |
|
|
| def forward(self, x, decode_step=None, decode_idx=None): |
| embs = [] |
| for i in range(self.n_dim): |
| e = self.emb[f'd_{i}'] |
| if self.dim == -1: |
| |
| e = e.view(1, *((1,) * i), self.shape[i], *((1,) * (self.n_dim - i - 1)), -1) |
| e = e.expand(1, *self.shape, -1) |
| else: |
| e = e.view(1, -1, *((1,) * i), self.shape[i], *((1,) * (self.n_dim - i - 1))) |
| e = e.expand(1, -1, *self.shape) |
| embs.append(e) |
|
|
| embs = torch.cat(embs, dim=self.dim) |
| if decode_step is not None: |
| embs = tensor_slice(embs, [0, *decode_idx, 0], |
| [x.shape[0], *(1,) * self.n_dim, x.shape[-1]]) |
|
|
| return x + embs |
|
|
| |
| def scaled_dot_product_attention(q, k, v, mask=None, attn_dropout=0., training=True): |
| |
|
|
| |
| attn = torch.matmul(q, k.transpose(-1, -2)) |
| attn = attn / np.sqrt(q.shape[-1]) |
| if mask is not None: |
| attn = attn.masked_fill(mask == 0, float('-inf')) |
| attn_float = F.softmax(attn, dim=-1) |
| attn = attn_float.type_as(attn) |
| attn = F.dropout(attn, p=attn_dropout, training=training) |
|
|
| a = torch.matmul(attn, v) |
|
|
| return a |
|
|
|
|
| class RightShift(nn.Module): |
| def __init__(self, embd_dim): |
| super().__init__() |
| self.embd_dim = embd_dim |
| self.sos = nn.Parameter(torch.FloatTensor(embd_dim).normal_(std=0.02), requires_grad=True) |
|
|
| def forward(self, x, decode_step): |
| if decode_step is not None and decode_step > 0: |
| return x |
|
|
| x_shape = list(x.shape) |
| x = x.flatten(start_dim=1, end_dim=-2) |
| sos = torch.ones(x_shape[0], 1, self.embd_dim, dtype=torch.float32).to(self.sos) * self.sos |
| sos = sos.type_as(x) |
| x = torch.cat([sos, x[:, :-1, :]], axis=1) |
| x = x.view(*x_shape) |
|
|
| return x |
|
|
|
|
| class GeLU2(nn.Module): |
| def forward(self, x): |
| return (1.702 * x).sigmoid() * x |
|
|
|
|
| class LayerNorm(nn.Module): |
| def __init__(self, embd_dim, class_cond_dim): |
| super().__init__() |
| self.conditional = class_cond_dim is not None |
|
|
| if self.conditional: |
| self.w = nn.Linear(class_cond_dim, embd_dim, bias=False) |
| nn.init.constant_(self.w.weight.data, 1. / np.sqrt(class_cond_dim)) |
| self.wb = nn.Linear(class_cond_dim, embd_dim, bias=False) |
| else: |
| self.g = nn.Parameter(torch.ones(embd_dim, dtype=torch.float32), requires_grad=True) |
| self.b = nn.Parameter(torch.zeros(embd_dim, dtype=torch.float32), requires_grad=True) |
|
|
| def forward(self, x, cond): |
| if self.conditional: |
| g = 1 + self.w(cond['class_cond']).view(x.shape[0], *(1,)*(len(x.shape)-2), x.shape[-1]) |
| b = self.wb(cond['class_cond']).view(x.shape[0], *(1,)*(len(x.shape)-2), x.shape[-1]) |
| else: |
| g = self.g |
| b = self.b |
|
|
| x_float = x.float() |
|
|
| mu = x_float.mean(dim=-1, keepdims=True) |
| s = (x_float - mu).square().mean(dim=-1, keepdims=True) |
| x_float = (x_float - mu) * (1e-5 + s.rsqrt()) |
| x_float = x_float * g + b |
|
|
| x = x_float.type_as(x) |
| return x |