| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import torch |
| | import torch.nn as nn |
| | import torch.utils.checkpoint as checkpoint |
| | from timm.models.layers import DropPath, to_2tuple, trunc_normal_ |
| |
|
| |
|
| | class Mlp(nn.Module): |
| | def __init__( |
| | self, |
| | in_features, |
| | hidden_features=None, |
| | out_features=None, |
| | act_layer=nn.GELU, |
| | drop=0.0, |
| | ): |
| | super().__init__() |
| | out_features = out_features or in_features |
| | hidden_features = hidden_features or in_features |
| | self.fc1 = nn.Linear(in_features, hidden_features) |
| | self.act = act_layer() |
| | self.fc2 = nn.Linear(hidden_features, out_features) |
| | self.drop = nn.Dropout(drop) |
| |
|
| | def forward(self, x): |
| | x = self.fc1(x) |
| | x = self.act(x) |
| | x = self.drop(x) |
| | x = self.fc2(x) |
| | x = self.drop(x) |
| | return x |
| |
|
| |
|
| | def window_partition(x, window_size): |
| | """ |
| | Args: |
| | x: (B, H, W, C) |
| | window_size (int): window size |
| | |
| | Returns: |
| | windows: (num_windows*B, window_size, window_size, C) |
| | """ |
| | B, H, W, C = x.shape |
| | x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) |
| | windows = ( |
| | x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) |
| | ) |
| | return windows |
| |
|
| |
|
| | def window_reverse(windows, window_size, H, W): |
| | """ |
| | Args: |
| | windows: (num_windows*B, window_size, window_size, C) |
| | window_size (int): Window size |
| | H (int): Height of image |
| | W (int): Width of image |
| | |
| | Returns: |
| | x: (B, H, W, C) |
| | """ |
| | B = int(windows.shape[0] / (H * W / window_size / window_size)) |
| | x = windows.view( |
| | B, H // window_size, W // window_size, window_size, window_size, -1 |
| | ) |
| | x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) |
| | return x |
| |
|
| |
|
| | class WindowAttention(nn.Module): |
| | r"""Window based multi-head self attention (W-MSA) module with relative position bias. |
| | It supports both of shifted and non-shifted window. |
| | |
| | Args: |
| | dim (int): Number of input channels. |
| | window_size (tuple[int]): The height and width of the window. |
| | num_heads (int): Number of attention heads. |
| | qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True |
| | qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set |
| | attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 |
| | proj_drop (float, optional): Dropout ratio of output. Default: 0.0 |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | dim, |
| | window_size, |
| | num_heads, |
| | qkv_bias=True, |
| | qk_scale=None, |
| | attn_drop=0.0, |
| | proj_drop=0.0, |
| | ): |
| | super().__init__() |
| | self.dim = dim |
| | self.window_size = window_size |
| | self.num_heads = num_heads |
| | head_dim = dim // num_heads |
| | self.scale = qk_scale or head_dim**-0.5 |
| |
|
| | |
| | self.relative_position_bias_table = nn.Parameter( |
| | torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) |
| | ) |
| |
|
| | |
| | coords_h = torch.arange(self.window_size[0]) |
| | coords_w = torch.arange(self.window_size[1]) |
| | coords = torch.stack(torch.meshgrid([coords_h, coords_w])) |
| | coords_flatten = torch.flatten(coords, 1) |
| | relative_coords = ( |
| | coords_flatten[:, :, None] - coords_flatten[:, None, :] |
| | ) |
| | relative_coords = relative_coords.permute( |
| | 1, 2, 0 |
| | ).contiguous() |
| | relative_coords[:, :, 0] += self.window_size[0] - 1 |
| | relative_coords[:, :, 1] += self.window_size[1] - 1 |
| | relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 |
| | relative_position_index = relative_coords.sum(-1) |
| | self.register_buffer("relative_position_index", relative_position_index) |
| |
|
| | self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
| | self.attn_drop = nn.Dropout(attn_drop) |
| | self.proj = nn.Linear(dim, dim) |
| | self.proj_drop = nn.Dropout(proj_drop) |
| |
|
| | trunc_normal_(self.relative_position_bias_table, std=0.02) |
| | self.softmax = nn.Softmax(dim=-1) |
| |
|
| | def forward(self, x, mask=None): |
| | """ |
| | Args: |
| | x: input features with shape of (num_windows*B, N, C) |
| | mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None |
| | """ |
| | B_, N, C = x.shape |
| | qkv = ( |
| | self.qkv(x) |
| | .reshape(B_, N, 3, self.num_heads, C // self.num_heads) |
| | .permute(2, 0, 3, 1, 4) |
| | ) |
| | q, k, v = ( |
| | qkv[0], |
| | qkv[1], |
| | qkv[2], |
| | ) |
| |
|
| | q = q * self.scale |
| | attn = q @ k.transpose(-2, -1) |
| |
|
| | relative_position_bias = self.relative_position_bias_table[ |
| | self.relative_position_index.view(-1) |
| | ].view( |
| | self.window_size[0] * self.window_size[1], |
| | self.window_size[0] * self.window_size[1], |
| | -1, |
| | ) |
| | relative_position_bias = relative_position_bias.permute( |
| | 2, 0, 1 |
| | ).contiguous() |
| | attn = attn + relative_position_bias.unsqueeze(0) |
| |
|
| | if mask is not None: |
| | nW = mask.shape[0] |
| | attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze( |
| | 1 |
| | ).unsqueeze(0) |
| | attn = attn.view(-1, self.num_heads, N, N) |
| | attn = self.softmax(attn) |
| | else: |
| | attn = self.softmax(attn) |
| |
|
| | attn = self.attn_drop(attn) |
| |
|
| | x = (attn @ v).transpose(1, 2).reshape(B_, N, C) |
| | x = self.proj(x) |
| | x = self.proj_drop(x) |
| | return x |
| |
|
| | def extra_repr(self) -> str: |
| | return f"dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}" |
| |
|
| | def flops(self, N): |
| | |
| | flops = 0 |
| | |
| | flops += N * self.dim * 3 * self.dim |
| | |
| | flops += self.num_heads * N * (self.dim // self.num_heads) * N |
| | |
| | flops += self.num_heads * N * N * (self.dim // self.num_heads) |
| | |
| | flops += N * self.dim * self.dim |
| | return flops |
| |
|
| |
|
| | class SwinTransformerBlock(nn.Module): |
| | r"""Swin Transformer Block. |
| | |
| | Args: |
| | dim (int): Number of input channels. |
| | input_resolution (tuple[int]): Input resulotion. |
| | num_heads (int): Number of attention heads. |
| | window_size (int): Window size. |
| | shift_size (int): Shift size for SW-MSA. |
| | mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. |
| | qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True |
| | qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. |
| | drop (float, optional): Dropout rate. Default: 0.0 |
| | attn_drop (float, optional): Attention dropout rate. Default: 0.0 |
| | drop_path (float, optional): Stochastic depth rate. Default: 0.0 |
| | act_layer (nn.Module, optional): Activation layer. Default: nn.GELU |
| | norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | dim, |
| | input_resolution, |
| | num_heads, |
| | window_size=7, |
| | shift_size=0, |
| | mlp_ratio=4.0, |
| | qkv_bias=True, |
| | qk_scale=None, |
| | drop=0.0, |
| | attn_drop=0.0, |
| | drop_path=0.0, |
| | act_layer=nn.GELU, |
| | norm_layer=nn.LayerNorm, |
| | ): |
| | super().__init__() |
| | self.dim = dim |
| | self.input_resolution = input_resolution |
| | self.num_heads = num_heads |
| | self.window_size = window_size |
| | self.shift_size = shift_size |
| | self.mlp_ratio = mlp_ratio |
| | if min(self.input_resolution) <= self.window_size: |
| | |
| | self.shift_size = 0 |
| | self.window_size = min(self.input_resolution) |
| | assert 0 <= self.shift_size < self.window_size, ( |
| | "shift_size must in 0-window_size" |
| | ) |
| |
|
| | self.norm1 = norm_layer(dim) |
| | self.attn = WindowAttention( |
| | dim, |
| | window_size=to_2tuple(self.window_size), |
| | num_heads=num_heads, |
| | qkv_bias=qkv_bias, |
| | qk_scale=qk_scale, |
| | attn_drop=attn_drop, |
| | proj_drop=drop, |
| | ) |
| |
|
| | self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() |
| | self.norm2 = norm_layer(dim) |
| | mlp_hidden_dim = int(dim * mlp_ratio) |
| | self.mlp = Mlp( |
| | in_features=dim, |
| | hidden_features=mlp_hidden_dim, |
| | act_layer=act_layer, |
| | drop=drop, |
| | ) |
| |
|
| | if self.shift_size > 0: |
| | |
| | H, W = self.input_resolution |
| | img_mask = torch.zeros((1, H, W, 1)) |
| | h_slices = ( |
| | slice(0, -self.window_size), |
| | slice(-self.window_size, -self.shift_size), |
| | slice(-self.shift_size, None), |
| | ) |
| | w_slices = ( |
| | slice(0, -self.window_size), |
| | slice(-self.window_size, -self.shift_size), |
| | slice(-self.shift_size, None), |
| | ) |
| | cnt = 0 |
| | for h in h_slices: |
| | for w in w_slices: |
| | img_mask[:, h, w, :] = cnt |
| | cnt += 1 |
| |
|
| | mask_windows = window_partition( |
| | img_mask, self.window_size |
| | ) |
| | mask_windows = mask_windows.view(-1, self.window_size * self.window_size) |
| | attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) |
| | attn_mask = attn_mask.masked_fill( |
| | attn_mask != 0, float(-100.0) |
| | ).masked_fill(attn_mask == 0, float(0.0)) |
| | else: |
| | attn_mask = None |
| |
|
| | self.register_buffer("attn_mask", attn_mask) |
| |
|
| | def forward(self, x): |
| | H, W = self.input_resolution |
| | B, L, C = x.shape |
| | assert L == H * W, "input feature has wrong size" |
| |
|
| | shortcut = x |
| | x = self.norm1(x) |
| | x = x.view(B, H, W, C) |
| |
|
| | |
| | if self.shift_size > 0: |
| | shifted_x = torch.roll( |
| | x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2) |
| | ) |
| | else: |
| | shifted_x = x |
| |
|
| | |
| | x_windows = window_partition( |
| | shifted_x, self.window_size |
| | ) |
| | x_windows = x_windows.view( |
| | -1, self.window_size * self.window_size, C |
| | ) |
| |
|
| | |
| | attn_windows = self.attn( |
| | x_windows, mask=self.attn_mask |
| | ) |
| |
|
| | |
| | attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) |
| | shifted_x = window_reverse(attn_windows, self.window_size, H, W) |
| |
|
| | |
| | if self.shift_size > 0: |
| | x = torch.roll( |
| | shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2) |
| | ) |
| | else: |
| | x = shifted_x |
| | x = x.view(B, H * W, C) |
| |
|
| | |
| | x = shortcut + self.drop_path(x) |
| | x = x + self.drop_path(self.mlp(self.norm2(x))) |
| |
|
| | return x |
| |
|
| | def extra_repr(self) -> str: |
| | return ( |
| | f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " |
| | f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" |
| | ) |
| |
|
| | def flops(self): |
| | flops = 0 |
| | H, W = self.input_resolution |
| | |
| | flops += self.dim * H * W |
| | |
| | nW = H * W / self.window_size / self.window_size |
| | flops += nW * self.attn.flops(self.window_size * self.window_size) |
| | |
| | flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio |
| | |
| | flops += self.dim * H * W |
| | return flops |
| |
|
| |
|
| | class PatchMerging(nn.Module): |
| | r"""Patch Merging Layer. |
| | |
| | Args: |
| | input_resolution (tuple[int]): Resolution of input feature. |
| | dim (int): Number of input channels. |
| | norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm |
| | """ |
| |
|
| | def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): |
| | super().__init__() |
| | self.input_resolution = input_resolution |
| | self.dim = dim |
| | self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) |
| | self.norm = norm_layer(4 * dim) |
| |
|
| | def forward(self, x): |
| | """ |
| | x: B, H*W, C |
| | """ |
| | H, W = self.input_resolution |
| | B, L, C = x.shape |
| | assert L == H * W, "input feature has wrong size" |
| | assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." |
| |
|
| | x = x.view(B, H, W, C) |
| |
|
| | x0 = x[:, 0::2, 0::2, :] |
| | x1 = x[:, 1::2, 0::2, :] |
| | x2 = x[:, 0::2, 1::2, :] |
| | x3 = x[:, 1::2, 1::2, :] |
| | x = torch.cat([x0, x1, x2, x3], -1) |
| | x = x.view(B, -1, 4 * C) |
| |
|
| | x = self.norm(x) |
| | x = self.reduction(x) |
| |
|
| | return x |
| |
|
| | def extra_repr(self) -> str: |
| | return f"input_resolution={self.input_resolution}, dim={self.dim}" |
| |
|
| | def flops(self): |
| | H, W = self.input_resolution |
| | flops = H * W * self.dim |
| | flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim |
| | return flops |
| |
|
| |
|
| | class BasicLayer(nn.Module): |
| | """A basic Swin Transformer layer for one stage. |
| | |
| | Args: |
| | dim (int): Number of input channels. |
| | input_resolution (tuple[int]): Input resolution. |
| | depth (int): Number of blocks. |
| | num_heads (int): Number of attention heads. |
| | window_size (int): Local window size. |
| | mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. |
| | qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True |
| | qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. |
| | drop (float, optional): Dropout rate. Default: 0.0 |
| | attn_drop (float, optional): Attention dropout rate. Default: 0.0 |
| | drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 |
| | norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm |
| | downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None |
| | use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | dim, |
| | input_resolution, |
| | depth, |
| | num_heads, |
| | window_size, |
| | mlp_ratio=4.0, |
| | qkv_bias=True, |
| | qk_scale=None, |
| | drop=0.0, |
| | attn_drop=0.0, |
| | drop_path=0.0, |
| | norm_layer=nn.LayerNorm, |
| | downsample=None, |
| | use_checkpoint=False, |
| | ): |
| | super().__init__() |
| | self.dim = dim |
| | self.input_resolution = input_resolution |
| | self.depth = depth |
| | self.use_checkpoint = use_checkpoint |
| |
|
| | |
| | self.blocks = nn.ModuleList( |
| | [ |
| | SwinTransformerBlock( |
| | dim=dim, |
| | input_resolution=input_resolution, |
| | num_heads=num_heads, |
| | window_size=window_size, |
| | shift_size=0 if (i % 2 == 0) else window_size // 2, |
| | mlp_ratio=mlp_ratio, |
| | qkv_bias=qkv_bias, |
| | qk_scale=qk_scale, |
| | drop=drop, |
| | attn_drop=attn_drop, |
| | drop_path=drop_path[i] |
| | if isinstance(drop_path, list) |
| | else drop_path, |
| | norm_layer=norm_layer, |
| | ) |
| | for i in range(depth) |
| | ] |
| | ) |
| |
|
| | |
| | if downsample is not None: |
| | self.downsample = downsample( |
| | input_resolution, dim=dim, norm_layer=norm_layer |
| | ) |
| | else: |
| | self.downsample = None |
| |
|
| | def forward(self, x): |
| | print("IN", x.shape) |
| | for blk in self.blocks: |
| | if self.use_checkpoint: |
| | x = checkpoint.checkpoint(blk, x) |
| | else: |
| | x = blk(x) |
| | if self.downsample is not None: |
| | x = self.downsample(x) |
| | return x |
| |
|
| | def extra_repr(self) -> str: |
| | return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" |
| |
|
| | def flops(self): |
| | flops = 0 |
| | for blk in self.blocks: |
| | flops += blk.flops() |
| | if self.downsample is not None: |
| | flops += self.downsample.flops() |
| | return flops |
| |
|