Spaces:
Sleeping
Sleeping
| """ | |
| Source: https://github.com/isalirezag/TReS/ | |
| """ | |
| import torch | |
| import torchvision.models as models | |
| import torchvision | |
| import torch.nn.functional as F | |
| from torch import nn, Tensor | |
| import numpy as np | |
| from scipy import stats | |
| from tqdm import tqdm | |
| import os | |
| import math | |
| import csv | |
| import copy | |
| import json | |
| from typing import Type, Any, Callable, Union, List, Optional | |
| __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', | |
| 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', | |
| 'wide_resnet50_2', 'wide_resnet101_2'] | |
| model_urls = { | |
| 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', | |
| 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', | |
| 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', | |
| 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', | |
| 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', | |
| 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', | |
| 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', | |
| 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', | |
| 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', | |
| } | |
| def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: | |
| """3x3 convolution with padding""" | |
| return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, | |
| padding=dilation, groups=groups, bias=False, dilation=dilation) | |
| def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: | |
| """1x1 convolution""" | |
| return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | |
| import torch.nn.functional as F | |
| import numpy as np | |
| class L2pooling(nn.Module): | |
| def __init__(self, filter_size=5, stride=1, channels=None, pad_off=0): | |
| super(L2pooling, self).__init__() | |
| self.padding = (filter_size - 2 )//2 | |
| self.stride = stride | |
| self.channels = channels | |
| a = np.hanning(filter_size)[1:-1] | |
| g = torch.Tensor(a[:,None]*a[None,:]) | |
| g = g/torch.sum(g) | |
| self.register_buffer('filter', g[None,None,:,:].repeat((self.channels,1,1,1))) | |
| def forward(self, input): | |
| input = input**2 | |
| out = F.conv2d(input, self.filter, stride=self.stride, padding=self.padding, groups=input.shape[1]) | |
| return (out+1e-12).sqrt() | |
| class BasicBlock(nn.Module): | |
| expansion: int = 1 | |
| def __init__( | |
| self, | |
| inplanes: int, | |
| planes: int, | |
| stride: int = 1, | |
| downsample: Optional[nn.Module] = None, | |
| groups: int = 1, | |
| base_width: int = 64, | |
| dilation: int = 1, | |
| norm_layer: Optional[Callable[..., nn.Module]] = None | |
| ) -> None: | |
| super(BasicBlock, self).__init__() | |
| if norm_layer is None: | |
| norm_layer = nn.BatchNorm2d | |
| if groups != 1 or base_width != 64: | |
| raise ValueError('BasicBlock only supports groups=1 and base_width=64') | |
| if dilation > 1: | |
| raise NotImplementedError("Dilation > 1 not supported in BasicBlock") | |
| # Both self.conv1 and self.downsample layers downsample the input when stride != 1 | |
| self.conv1 = conv3x3(inplanes, planes, stride) | |
| self.bn1 = norm_layer(planes) | |
| self.relu = nn.ReLU(inplace=True) | |
| self.conv2 = conv3x3(planes, planes) | |
| self.bn2 = norm_layer(planes) | |
| self.downsample = downsample | |
| self.stride = stride | |
| def forward(self, x: Tensor) -> Tensor: | |
| identity = x | |
| out = self.conv1(x) | |
| out = self.bn1(out) | |
| out = self.relu(out) | |
| out = self.conv2(out) | |
| out = self.bn2(out) | |
| if self.downsample is not None: | |
| identity = self.downsample(x) | |
| out += identity | |
| out = self.relu(out) | |
| return out | |
| class Bottleneck(nn.Module): | |
| # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) | |
| # while original implementation places the stride at the first 1x1 convolution(self.conv1) | |
| # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. | |
| # This variant is also known as ResNet V1.5 and improves accuracy according to | |
| # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. | |
| expansion: int = 4 | |
| def __init__( | |
| self, | |
| inplanes: int, | |
| planes: int, | |
| stride: int = 1, | |
| downsample: Optional[nn.Module] = None, | |
| groups: int = 1, | |
| base_width: int = 64, | |
| dilation: int = 1, | |
| norm_layer: Optional[Callable[..., nn.Module]] = None | |
| ) -> None: | |
| super(Bottleneck, self).__init__() | |
| if norm_layer is None: | |
| norm_layer = nn.BatchNorm2d | |
| width = int(planes * (base_width / 64.)) * groups | |
| # Both self.conv2 and self.downsample layers downsample the input when stride != 1 | |
| self.conv1 = conv1x1(inplanes, width) | |
| self.bn1 = norm_layer(width) | |
| self.conv2 = conv3x3(width, width, stride, groups, dilation) | |
| self.bn2 = norm_layer(width) | |
| self.conv3 = conv1x1(width, planes * self.expansion) | |
| self.bn3 = norm_layer(planes * self.expansion) | |
| self.relu = nn.ReLU(inplace=True) | |
| self.downsample = downsample | |
| self.stride = stride | |
| def forward(self, x: Tensor) -> Tensor: | |
| identity = x | |
| out = self.conv1(x) | |
| out = self.bn1(out) | |
| out = self.relu(out) | |
| out = self.conv2(out) | |
| out = self.bn2(out) | |
| out = self.relu(out) | |
| out = self.conv3(out) | |
| out = self.bn3(out) | |
| if self.downsample is not None: | |
| identity = self.downsample(x) | |
| out += identity | |
| out = self.relu(out) | |
| return out | |
| class ResNet(nn.Module): | |
| def __init__( | |
| self, | |
| block: Type[Union[BasicBlock, Bottleneck]], | |
| layers: List[int], | |
| num_classes: int = 1000, | |
| zero_init_residual: bool = False, | |
| groups: int = 1, | |
| width_per_group: int = 64, | |
| replace_stride_with_dilation: Optional[List[bool]] = None, | |
| norm_layer: Optional[Callable[..., nn.Module]] = None | |
| ) -> None: | |
| super(ResNet, self).__init__() | |
| if norm_layer is None: | |
| norm_layer = nn.BatchNorm2d | |
| self._norm_layer = norm_layer | |
| self.inplanes = 64 | |
| self.dilation = 1 | |
| if replace_stride_with_dilation is None: | |
| # each element in the tuple indicates if we should replace | |
| # the 2x2 stride with a dilated convolution instead | |
| replace_stride_with_dilation = [False, False, False] | |
| if len(replace_stride_with_dilation) != 3: | |
| raise ValueError("replace_stride_with_dilation should be None " | |
| "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) | |
| self.groups = groups | |
| self.base_width = width_per_group | |
| self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, | |
| bias=False) | |
| self.bn1 = norm_layer(self.inplanes) | |
| self.relu = nn.ReLU(inplace=True) | |
| self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) | |
| # self.maxpool = L2pooling(channels=64) | |
| self.layer1 = self._make_layer(block, 64, layers[0]) | |
| self.layer2 = self._make_layer(block, 128, layers[1], stride=2, | |
| dilate=replace_stride_with_dilation[0]) | |
| self.layer3 = self._make_layer(block, 256, layers[2], stride=2, | |
| dilate=replace_stride_with_dilation[1]) | |
| self.layer4 = self._make_layer(block, 512, layers[3], stride=2, | |
| dilate=replace_stride_with_dilation[2]) | |
| self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) | |
| self.fc = nn.Linear(512 * block.expansion, num_classes) | |
| for m in self.modules(): | |
| if isinstance(m, nn.Conv2d): | |
| nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') | |
| elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): | |
| nn.init.constant_(m.weight, 1) | |
| nn.init.constant_(m.bias, 0) | |
| # Zero-initialize the last BN in each residual branch, | |
| # so that the residual branch starts with zeros, and each residual block behaves like an identity. | |
| # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 | |
| if zero_init_residual: | |
| for m in self.modules(): | |
| if isinstance(m, Bottleneck): | |
| nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type] | |
| elif isinstance(m, BasicBlock): | |
| nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type] | |
| def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int, | |
| stride: int = 1, dilate: bool = False) -> nn.Sequential: | |
| norm_layer = self._norm_layer | |
| downsample = None | |
| previous_dilation = self.dilation | |
| if dilate: | |
| self.dilation *= stride | |
| stride = 1 | |
| if stride != 1 or self.inplanes != planes * block.expansion: | |
| downsample = nn.Sequential( | |
| conv1x1(self.inplanes, planes * block.expansion, stride), | |
| norm_layer(planes * block.expansion), | |
| ) | |
| layers = [] | |
| layers.append(block(self.inplanes, planes, stride, downsample, self.groups, | |
| self.base_width, previous_dilation, norm_layer)) | |
| self.inplanes = planes * block.expansion | |
| for _ in range(1, blocks): | |
| layers.append(block(self.inplanes, planes, groups=self.groups, | |
| base_width=self.base_width, dilation=self.dilation, | |
| norm_layer=norm_layer)) | |
| return nn.Sequential(*layers) | |
| def _forward_impl(self, x: Tensor) -> Tensor: | |
| # See note [TorchScript super()] | |
| x = self.conv1(x) | |
| x = self.bn1(x) | |
| x = self.relu(x) | |
| x = self.maxpool(x) | |
| x = self.layer1(x) | |
| l1 = x | |
| x = self.layer2(x) | |
| l2 = x | |
| x = self.layer3(x) | |
| l3 = x | |
| x = self.layer4(x) | |
| l4 = x | |
| x = self.avgpool(x) | |
| x = torch.flatten(x, 1) | |
| x = self.fc(x) | |
| return x,l1,l2,l3,l4 | |
| def forward(self, x: Tensor) -> Tensor: | |
| return self._forward_impl(x) | |
| def _resnet( | |
| arch: str, | |
| block: Type[Union[BasicBlock, Bottleneck]], | |
| layers: List[int], | |
| pretrained: bool, | |
| progress: bool, | |
| **kwargs: Any | |
| ) -> ResNet: | |
| model = ResNet(block, layers, **kwargs) | |
| if pretrained: | |
| state_dict = torch.hub.load_state_dict_from_url(model_urls[arch], | |
| progress=progress) | |
| model.load_state_dict(state_dict) | |
| return model | |
| def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: | |
| r"""ResNet-18 model from | |
| `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ | |
| Args: | |
| pretrained (bool): If True, returns a model pre-trained on ImageNet | |
| progress (bool): If True, displays a progress bar of the download to stderr | |
| """ | |
| return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, | |
| **kwargs) | |
| def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: | |
| r"""ResNet-34 model from | |
| `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ | |
| Args: | |
| pretrained (bool): If True, returns a model pre-trained on ImageNet | |
| progress (bool): If True, displays a progress bar of the download to stderr | |
| """ | |
| return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, | |
| **kwargs) | |
| def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: | |
| r"""ResNet-50 model from | |
| `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ | |
| Args: | |
| pretrained (bool): If True, returns a model pre-trained on ImageNet | |
| progress (bool): If True, displays a progress bar of the download to stderr | |
| """ | |
| return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, | |
| **kwargs) | |
| def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: | |
| r"""ResNet-101 model from | |
| `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ | |
| Args: | |
| pretrained (bool): If True, returns a model pre-trained on ImageNet | |
| progress (bool): If True, displays a progress bar of the download to stderr | |
| """ | |
| return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, | |
| **kwargs) | |
| def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: | |
| r"""ResNet-152 model from | |
| `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ | |
| Args: | |
| pretrained (bool): If True, returns a model pre-trained on ImageNet | |
| progress (bool): If True, displays a progress bar of the download to stderr | |
| """ | |
| return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, | |
| **kwargs) | |
| def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: | |
| r"""ResNeXt-50 32x4d model from | |
| `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ | |
| Args: | |
| pretrained (bool): If True, returns a model pre-trained on ImageNet | |
| progress (bool): If True, displays a progress bar of the download to stderr | |
| """ | |
| kwargs['groups'] = 32 | |
| kwargs['width_per_group'] = 4 | |
| return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], | |
| pretrained, progress, **kwargs) | |
| def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: | |
| r"""ResNeXt-101 32x8d model from | |
| `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ | |
| Args: | |
| pretrained (bool): If True, returns a model pre-trained on ImageNet | |
| progress (bool): If True, displays a progress bar of the download to stderr | |
| """ | |
| kwargs['groups'] = 32 | |
| kwargs['width_per_group'] = 8 | |
| return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], | |
| pretrained, progress, **kwargs) | |
| def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: | |
| r"""Wide ResNet-50-2 model from | |
| `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ | |
| The model is the same as ResNet except for the bottleneck number of channels | |
| which is twice larger in every block. The number of channels in outer 1x1 | |
| convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 | |
| channels, and in Wide ResNet-50-2 has 2048-1024-2048. | |
| Args: | |
| pretrained (bool): If True, returns a model pre-trained on ImageNet | |
| progress (bool): If True, displays a progress bar of the download to stderr | |
| """ | |
| kwargs['width_per_group'] = 64 * 2 | |
| return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], | |
| pretrained, progress, **kwargs) | |
| def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: | |
| r"""Wide ResNet-101-2 model from | |
| `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ | |
| The model is the same as ResNet except for the bottleneck number of channels | |
| which is twice larger in every block. The number of channels in outer 1x1 | |
| convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 | |
| channels, and in Wide ResNet-50-2 has 2048-1024-2048. | |
| Args: | |
| pretrained (bool): If True, returns a model pre-trained on ImageNet | |
| progress (bool): If True, displays a progress bar of the download to stderr | |
| """ | |
| kwargs['width_per_group'] = 64 * 2 | |
| return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], | |
| pretrained, progress, **kwargs) | |
| class NestedTensor(object): | |
| def __init__(self, tensors, mask: Optional[Tensor]): | |
| self.tensors = tensors | |
| self.mask = mask | |
| def to(self, device): | |
| cast_tensor = self.tensors.to(device) | |
| mask = self.mask | |
| if mask is not None: | |
| assert mask is not None | |
| cast_mask = mask.to(device) | |
| else: | |
| cast_mask = None | |
| return NestedTensor(cast_tensor, cast_mask) | |
| def decompose(self): | |
| return self.tensors, self.mask | |
| def __repr__(self): | |
| return str(self.tensors) | |
| class PositionEmbeddingSine(nn.Module): | |
| """ | |
| This is a more standard version of the position embedding, very similar to the one | |
| used by the Attention is all you need paper, generalized to work on images. | |
| """ | |
| def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): | |
| super().__init__() | |
| self.num_pos_feats = num_pos_feats # 128 in dert | |
| self.temperature = temperature | |
| self.normalize = normalize | |
| if scale is not None and normalize is False: | |
| raise ValueError("normalize should be True if scale is passed") | |
| if scale is None: | |
| scale = 2 * math.pi | |
| self.scale = scale | |
| # def forward(self, tensor_list: NestedTensor): | |
| def forward(self, tensor_val): | |
| x = tensor_val | |
| # mask = tensor_list.mask # it has 1 for padding, so the important stuff is 0 | |
| mask = torch.gt(torch.zeros(x.shape),0).to( x.device)[:,0,:,:] | |
| assert mask is not None | |
| not_mask = ~mask | |
| y_embed = not_mask.cumsum(1, dtype=torch.float32) | |
| x_embed = not_mask.cumsum(2, dtype=torch.float32) | |
| if self.normalize: | |
| eps = 1e-6 | |
| y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale | |
| x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale | |
| dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) | |
| dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) | |
| pos_x = x_embed[:, :, :, None] / dim_t | |
| pos_y = y_embed[:, :, :, None] / dim_t | |
| pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) | |
| pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) | |
| pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) | |
| return pos | |
| class PositionEmbeddingLearned(nn.Module): | |
| """ | |
| Absolute pos embedding, learned. | |
| """ | |
| def __init__(self, num_pos_feats=256): | |
| super().__init__() | |
| self.row_embed = nn.Embedding(50, num_pos_feats) | |
| self.col_embed = nn.Embedding(50, num_pos_feats) | |
| self.reset_parameters() | |
| def reset_parameters(self): | |
| nn.init.uniform_(self.row_embed.weight) | |
| nn.init.uniform_(self.col_embed.weight) | |
| def forward(self, tensor_list: NestedTensor): | |
| x = tensor_list.tensors | |
| h, w = x.shape[-2:] | |
| i = torch.arange(w, device=x.device) | |
| j = torch.arange(h, device=x.device) | |
| x_emb = self.col_embed(i) | |
| y_emb = self.row_embed(j) | |
| pos = torch.cat([ | |
| x_emb.unsqueeze(0).repeat(h, 1, 1), | |
| y_emb.unsqueeze(1).repeat(1, w, 1), | |
| ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) | |
| return pos | |
| def build_position_encoding(args): | |
| N_steps = args.hidden_dim // 2 | |
| if args.position_embedding in ('v2', 'sine'): | |
| # TODO find a better way of exposing other arguments | |
| position_embedding = PositionEmbeddingSine(N_steps, normalize=True) | |
| elif args.position_embedding in ('v3', 'learned'): | |
| position_embedding = PositionEmbeddingLearned(N_steps) | |
| else: | |
| raise ValueError(f"not supported {args.position_embedding}") | |
| return position_embedding | |
| class Transformer(nn.Module): | |
| def __init__(self, d_model=256, nhead=8, num_encoder_layers=6, | |
| num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, | |
| activation="relu", normalize_before=False, | |
| return_intermediate_dec=False): | |
| super().__init__() | |
| encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, | |
| dropout, activation, normalize_before) | |
| encoder_norm = nn.LayerNorm(d_model) if normalize_before else None | |
| self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) | |
| self._reset_parameters() | |
| self.nhead = nhead | |
| def _reset_parameters(self): | |
| for p in self.parameters(): | |
| if p.dim() > 1: | |
| nn.init.xavier_uniform_(p) | |
| def forward(self, src , pos_embed): | |
| # flatten NxCxHxW to HWxNxC | |
| bs, c, h, w = src.shape | |
| src2 = src | |
| src = src.flatten(2).permute(2, 0, 1) | |
| pos_embed2 = pos_embed | |
| pos_embed = pos_embed.flatten(2).permute(2, 0, 1) | |
| memory = self.encoder(src, pos=pos_embed) | |
| return memory.permute(1, 2, 0).view(bs, c, h, w) | |
| class TransformerEncoder(nn.Module): | |
| def __init__(self, encoder_layer, num_layers, norm=None): | |
| super().__init__() | |
| self.layers = _get_clones(encoder_layer, num_layers) | |
| self.num_layers = num_layers | |
| self.norm = norm | |
| def forward(self, src, | |
| mask: Optional[Tensor] = None, | |
| src_key_padding_mask: Optional[Tensor] = None, | |
| pos: Optional[Tensor] = None): | |
| output = src | |
| for layer in self.layers: | |
| output = layer(output, src_mask=mask, | |
| src_key_padding_mask=src_key_padding_mask, pos=pos) | |
| if self.norm is not None: | |
| output = self.norm(output) | |
| return output | |
| class TransformerEncoderLayer(nn.Module): | |
| def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, | |
| activation="relu", normalize_before=False): | |
| super().__init__() | |
| self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) | |
| # Implementation of Feedforward model | |
| self.linear1 = nn.Linear(d_model, dim_feedforward) | |
| self.dropout = nn.Dropout(dropout) | |
| self.linear2 = nn.Linear(dim_feedforward, d_model) | |
| self.norm1 = nn.LayerNorm(d_model) | |
| self.norm2 = nn.LayerNorm(d_model) | |
| self.dropout1 = nn.Dropout(dropout) | |
| self.dropout2 = nn.Dropout(dropout) | |
| self.activation = _get_activation_fn(activation) | |
| self.normalize_before = normalize_before | |
| def with_pos_embed(self, tensor, pos: Optional[Tensor]): | |
| return tensor if pos is None else tensor + pos | |
| def forward_post(self, | |
| src, | |
| src_mask: Optional[Tensor] = None, | |
| src_key_padding_mask: Optional[Tensor] = None, | |
| pos: Optional[Tensor] = None): | |
| q = k = self.with_pos_embed(src, pos) | |
| src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, | |
| key_padding_mask=src_key_padding_mask)[0] | |
| src = src + self.dropout1(src2) | |
| src = self.norm1(src) | |
| src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) | |
| src = src + self.dropout2(src2) | |
| src = self.norm2(src) | |
| return src | |
| def forward_pre(self, src, | |
| src_mask: Optional[Tensor] = None, | |
| src_key_padding_mask: Optional[Tensor] = None, | |
| pos: Optional[Tensor] = None): | |
| src2 = self.norm1(src) | |
| q = k = self.with_pos_embed(src2, pos) | |
| src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, | |
| key_padding_mask=src_key_padding_mask)[0] | |
| src = src + self.dropout1(src2) | |
| src2 = self.norm2(src) | |
| src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) | |
| src = src + self.dropout2(src2) | |
| return src | |
| def forward(self, src, | |
| src_mask: Optional[Tensor] = None, | |
| src_key_padding_mask: Optional[Tensor] = None, | |
| pos: Optional[Tensor] = None): | |
| if self.normalize_before: | |
| return self.forward_pre(src, src_mask, src_key_padding_mask, pos) | |
| return self.forward_post(src, src_mask, src_key_padding_mask, pos) | |
| def _get_clones(module, N): | |
| return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | |
| def build_transformer(args): | |
| return Transformer( | |
| d_model=args.hidden_dim, | |
| dropout=args.dropout, | |
| nhead=args.nheads, | |
| dim_feedforward=args.dim_feedforward, | |
| num_encoder_layers=args.enc_layers, | |
| num_decoder_layers=args.dec_layers, | |
| normalize_before=args.pre_norm, | |
| return_intermediate_dec=True, | |
| ) | |
| def _get_activation_fn(activation): | |
| """Return an activation function given a string""" | |
| if activation == "relu": | |
| return F.relu | |
| if activation == "gelu": | |
| return F.gelu | |
| if activation == "glu": | |
| return F.glu | |
| raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | |
| if __name__=='__main__': | |
| d_modelt=1024 | |
| nheadt=8 | |
| num_encoder_layerst=2 | |
| dim_feedforwardt=1024 | |
| dropout=0.1 | |
| normalize_beforet=False | |
| transformer = Transformer(d_model=d_modelt,nhead=nheadt,num_encoder_layers=num_encoder_layerst, | |
| dim_feedforward=dim_feedforwardt,normalize_before=normalize_beforet) | |
| hidden_dim = d_modelt | |
| pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True) | |
| src = torch.rand(2, hidden_dim, 19, 29) | |
| pos_add = pos_enc(src) | |
| out = transformer(src,pos_embed = pos_add) | |
| print(torch.sum(out),out.shape) | |
| class L2pooling(nn.Module): | |
| def __init__(self, filter_size=5, stride=1, channels=None, pad_off=0): | |
| super(L2pooling, self).__init__() | |
| self.padding = (filter_size - 2 )//2 | |
| self.stride = stride | |
| self.channels = channels | |
| a = np.hanning(filter_size)[1:-1] | |
| g = torch.Tensor(a[:,None]*a[None,:]) | |
| g = g/torch.sum(g) | |
| self.register_buffer('filter', g[None,None,:,:].repeat((self.channels,1,1,1))) | |
| def forward(self, input): | |
| input = input**2 | |
| out = F.conv2d(input, self.filter, stride=self.stride, padding=self.padding, groups=input.shape[1]) | |
| return (out+1e-12).sqrt() | |
| class Net(nn.Module): | |
| def __init__(self,cfg,device): | |
| super(Net, self).__init__() | |
| self.device = device | |
| self.cfg = cfg | |
| self.L2pooling_l1 = L2pooling(channels=256) | |
| self.L2pooling_l2 = L2pooling(channels=512) | |
| self.L2pooling_l3 = L2pooling(channels=1024) | |
| self.L2pooling_l4 = L2pooling(channels=2048) | |
| if cfg.network =='resnet50': | |
| dim_modelt = 3840 | |
| modelpretrain = models.resnet50(pretrained=True) | |
| elif cfg.network =='resnet34': | |
| modelpretrain = models.resnet34(pretrained=True) | |
| dim_modelt = 960 | |
| self.L2pooling_l1 = L2pooling(channels=64) | |
| self.L2pooling_l2 = L2pooling(channels=128) | |
| self.L2pooling_l3 = L2pooling(channels=256) | |
| self.L2pooling_l4 = L2pooling(channels=512) | |
| elif cfg.network == 'resnet18': | |
| modelpretrain = models.resnet18(pretrained=True) | |
| dim_modelt = 960 | |
| self.L2pooling_l1 = L2pooling(channels=64) | |
| self.L2pooling_l2 = L2pooling(channels=128) | |
| self.L2pooling_l3 = L2pooling(channels=256) | |
| self.L2pooling_l4 = L2pooling(channels=512) | |
| torch.save(modelpretrain.state_dict(), 'modelpretrain') | |
| self.model = resnet50() | |
| self.model.load_state_dict(torch.load('modelpretrain'), strict=True) | |
| self.dim_modelt = dim_modelt | |
| os.remove("modelpretrain") | |
| nheadt=cfg.nheadt | |
| num_encoder_layerst=cfg.num_encoder_layerst | |
| dim_feedforwardt=cfg.dim_feedforwardt | |
| ddropout=0.5 | |
| normalize =True | |
| self.transformer = Transformer(d_model=dim_modelt,nhead=nheadt, | |
| num_encoder_layers=num_encoder_layerst, | |
| dim_feedforward=dim_feedforwardt, | |
| normalize_before=normalize, | |
| dropout = ddropout) | |
| self.position_embedding = PositionEmbeddingSine(dim_modelt // 2, normalize=True) | |
| self.fc2 = nn.Linear(dim_modelt, self.model.fc.in_features) | |
| self.fc = nn.Linear(self.model.fc.in_features*2, 1) | |
| self.ReLU = nn.ReLU() | |
| self.avg7 = nn.AvgPool2d((7, 7)) | |
| self.avg8 = nn.AvgPool2d((8, 8)) | |
| self.avg4 = nn.AvgPool2d((4, 4)) | |
| self.avg2 = nn.AvgPool2d((2, 2)) | |
| self.drop2d = nn.Dropout(p=0.1) | |
| self.consistency = nn.L1Loss() | |
| def forward(self, x): | |
| self.pos_enc_1 = self.position_embedding(torch.ones(1, self.dim_modelt, 7, 7).to(self.device)) | |
| self.pos_enc = self.pos_enc_1.repeat(x.shape[0],1,1,1).contiguous() | |
| out,layer1,layer2,layer3,layer4 = self.model(x) | |
| layer1_t = self.avg8(self.drop2d(self.L2pooling_l1(F.normalize(layer1,dim=1, p=2)))) | |
| layer2_t = self.avg4(self.drop2d(self.L2pooling_l2(F.normalize(layer2,dim=1, p=2)))) | |
| layer3_t = self.avg2(self.drop2d(self.L2pooling_l3(F.normalize(layer3,dim=1, p=2)))) | |
| layer4_t = self.drop2d(self.L2pooling_l4(F.normalize(layer4,dim=1, p=2))) | |
| layers = torch.cat((layer1_t,layer2_t,layer3_t,layer4_t),dim=1) | |
| out_t_c = self.transformer(layers,self.pos_enc) | |
| out_t_o = torch.flatten(self.avg7(out_t_c),start_dim=1) | |
| out_t_o = self.fc2(out_t_o) | |
| layer4_o = self.avg7(layer4) | |
| layer4_o = torch.flatten(layer4_o,start_dim=1) | |
| predictionQA = self.fc(torch.flatten(torch.cat((out_t_o,layer4_o),dim=1),start_dim=1)) | |
| # ============================================================================= | |
| # ============================================================================= | |
| # fout,flayer1,flayer2,flayer3,flayer4 = self.model(torch.flip(x, [3])) | |
| # flayer1_t = self.avg8( self.L2pooling_l1(F.normalize(flayer1,dim=1, p=2))) | |
| # flayer2_t = self.avg4( self.L2pooling_l2(F.normalize(flayer2,dim=1, p=2))) | |
| # flayer3_t = self.avg2( self.L2pooling_l3(F.normalize(flayer3,dim=1, p=2))) | |
| # flayer4_t = self.L2pooling_l4(F.normalize(flayer4,dim=1, p=2)) | |
| # flayers = torch.cat((flayer1_t,flayer2_t,flayer3_t,flayer4_t),dim=1) | |
| # fout_t_c = self.transformer(flayers,self.pos_enc) | |
| # fout_t_o = torch.flatten(self.avg7(fout_t_c),start_dim=1) | |
| # fout_t_o = (self.fc2(fout_t_o)) | |
| # flayer4_o = self.avg7(flayer4) | |
| # flayer4_o = torch.flatten(flayer4_o,start_dim=1) | |
| # fpredictionQA = (self.fc(torch.flatten(torch.cat((fout_t_o,flayer4_o),dim=1),start_dim=1))) | |
| # consistloss1 = self.consistency(out_t_c,fout_t_c.detach()) | |
| # consistloss2 = self.consistency(layer4,flayer4.detach()) | |
| # consistloss = 1*(consistloss1+consistloss2) | |
| return predictionQA, torch.flatten(torch.cat((out_t_o,layer4_o),dim=1),start_dim=1) | |