|
|
from collections import OrderedDict |
|
|
from typing import Dict, List, Optional, Union |
|
|
|
|
|
import torch |
|
|
from torch import nn |
|
|
from torch.nn import functional as F |
|
|
|
|
|
from .utils import freeze_batch_norm_2d, feature_take_indices |
|
|
|
|
|
|
|
|
class Bottleneck(nn.Module): |
|
|
expansion = 4 |
|
|
|
|
|
def __init__(self, inplanes, planes, stride=1): |
|
|
super().__init__() |
|
|
|
|
|
|
|
|
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) |
|
|
self.bn1 = nn.BatchNorm2d(planes) |
|
|
self.act1 = nn.ReLU(inplace=True) |
|
|
|
|
|
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) |
|
|
self.bn2 = nn.BatchNorm2d(planes) |
|
|
self.act2 = nn.ReLU(inplace=True) |
|
|
|
|
|
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() |
|
|
|
|
|
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) |
|
|
self.bn3 = nn.BatchNorm2d(planes * self.expansion) |
|
|
self.act3 = nn.ReLU(inplace=True) |
|
|
|
|
|
self.downsample = None |
|
|
self.stride = stride |
|
|
|
|
|
if stride > 1 or inplanes != planes * Bottleneck.expansion: |
|
|
|
|
|
self.downsample = nn.Sequential(OrderedDict([ |
|
|
("-1", nn.AvgPool2d(stride)), |
|
|
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), |
|
|
("1", nn.BatchNorm2d(planes * self.expansion)) |
|
|
])) |
|
|
|
|
|
def forward(self, x: torch.Tensor): |
|
|
identity = x |
|
|
|
|
|
out = self.act1(self.bn1(self.conv1(x))) |
|
|
out = self.act2(self.bn2(self.conv2(out))) |
|
|
out = self.avgpool(out) |
|
|
out = self.bn3(self.conv3(out)) |
|
|
|
|
|
if self.downsample is not None: |
|
|
identity = self.downsample(x) |
|
|
|
|
|
out += identity |
|
|
out = self.act3(out) |
|
|
return out |
|
|
|
|
|
|
|
|
class AttentionPool2d(nn.Module): |
|
|
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): |
|
|
super().__init__() |
|
|
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) |
|
|
self.k_proj = nn.Linear(embed_dim, embed_dim) |
|
|
self.q_proj = nn.Linear(embed_dim, embed_dim) |
|
|
self.v_proj = nn.Linear(embed_dim, embed_dim) |
|
|
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) |
|
|
self.num_heads = num_heads |
|
|
|
|
|
def forward(self, x): |
|
|
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) |
|
|
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) |
|
|
x = x + self.positional_embedding[:, None, :].to(x.dtype) |
|
|
x, _ = F.multi_head_attention_forward( |
|
|
query=x, key=x, value=x, |
|
|
embed_dim_to_check=x.shape[-1], |
|
|
num_heads=self.num_heads, |
|
|
q_proj_weight=self.q_proj.weight, |
|
|
k_proj_weight=self.k_proj.weight, |
|
|
v_proj_weight=self.v_proj.weight, |
|
|
in_proj_weight=None, |
|
|
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), |
|
|
bias_k=None, |
|
|
bias_v=None, |
|
|
add_zero_attn=False, |
|
|
dropout_p=0., |
|
|
out_proj_weight=self.c_proj.weight, |
|
|
out_proj_bias=self.c_proj.bias, |
|
|
use_separate_proj_weight=True, |
|
|
training=self.training, |
|
|
need_weights=False |
|
|
) |
|
|
|
|
|
return x[0] |
|
|
|
|
|
|
|
|
class ModifiedResNet(nn.Module): |
|
|
""" |
|
|
A ResNet class that is similar to torchvision's but contains the following changes: |
|
|
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. |
|
|
- Performs antialiasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 |
|
|
- The final pooling layer is a QKV attention instead of an average pool |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
layers: List[int], |
|
|
output_dim: int, |
|
|
heads: int, |
|
|
image_size: int = 224, |
|
|
width: int = 64, |
|
|
): |
|
|
super().__init__() |
|
|
self.output_dim = output_dim |
|
|
self.image_size = image_size |
|
|
|
|
|
|
|
|
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False) |
|
|
self.bn1 = nn.BatchNorm2d(width // 2) |
|
|
self.act1 = nn.ReLU(inplace=True) |
|
|
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False) |
|
|
self.bn2 = nn.BatchNorm2d(width // 2) |
|
|
self.act2 = nn.ReLU(inplace=True) |
|
|
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) |
|
|
self.bn3 = nn.BatchNorm2d(width) |
|
|
self.act3 = nn.ReLU(inplace=True) |
|
|
self.avgpool = nn.AvgPool2d(2) |
|
|
|
|
|
|
|
|
self._inplanes = width |
|
|
self.layer1 = self._make_layer(width, layers[0]) |
|
|
self.layer2 = self._make_layer(width * 2, layers[1], stride=2) |
|
|
self.layer3 = self._make_layer(width * 4, layers[2], stride=2) |
|
|
self.layer4 = self._make_layer(width * 8, layers[3], stride=2) |
|
|
|
|
|
embed_dim = width * 32 |
|
|
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim) |
|
|
|
|
|
self.init_parameters() |
|
|
|
|
|
def _make_layer(self, planes, blocks, stride=1): |
|
|
layers = [Bottleneck(self._inplanes, planes, stride)] |
|
|
|
|
|
self._inplanes = planes * Bottleneck.expansion |
|
|
for _ in range(1, blocks): |
|
|
layers.append(Bottleneck(self._inplanes, planes)) |
|
|
|
|
|
return nn.Sequential(*layers) |
|
|
|
|
|
def init_parameters(self): |
|
|
if self.attnpool is not None: |
|
|
std = self.attnpool.c_proj.in_features ** -0.5 |
|
|
nn.init.normal_(self.attnpool.q_proj.weight, std=std) |
|
|
nn.init.normal_(self.attnpool.k_proj.weight, std=std) |
|
|
nn.init.normal_(self.attnpool.v_proj.weight, std=std) |
|
|
nn.init.normal_(self.attnpool.c_proj.weight, std=std) |
|
|
|
|
|
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]: |
|
|
for name, param in resnet_block.named_parameters(): |
|
|
if name.endswith("bn3.weight"): |
|
|
nn.init.zeros_(param) |
|
|
|
|
|
def lock(self, unlocked_groups=0, freeze_bn_stats=False): |
|
|
assert unlocked_groups == 0, 'partial locking not currently supported for this model' |
|
|
for param in self.parameters(): |
|
|
param.requires_grad = False |
|
|
if freeze_bn_stats: |
|
|
freeze_batch_norm_2d(self) |
|
|
|
|
|
@torch.jit.ignore |
|
|
def set_grad_checkpointing(self, enable=True): |
|
|
|
|
|
pass |
|
|
|
|
|
def stem(self, x): |
|
|
x = self.act1(self.bn1(self.conv1(x))) |
|
|
x = self.act2(self.bn2(self.conv2(x))) |
|
|
x = self.act3(self.bn3(self.conv3(x))) |
|
|
x = self.avgpool(x) |
|
|
return x |
|
|
|
|
|
def forward_intermediates( |
|
|
self, |
|
|
x: torch.Tensor, |
|
|
indices: Optional[Union[int, List[int]]] = None, |
|
|
stop_early: bool = False, |
|
|
normalize_intermediates: bool = False, |
|
|
intermediates_only: bool = False, |
|
|
output_fmt: str = 'NCHW', |
|
|
output_extra_tokens: bool = False, |
|
|
) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]: |
|
|
""" Forward features that returns intermediates. |
|
|
|
|
|
Args: |
|
|
x: Input image tensor |
|
|
indices: Take last n blocks if int, all if None, select matching indices if sequence |
|
|
stop_early: Stop iterating over blocks when last desired intermediate hit |
|
|
normalize_intermediates: Apply final norm layer to all intermediates |
|
|
intermediates_only: Only return intermediate features |
|
|
output_fmt: Shape of intermediate feature outputs |
|
|
output_extra_tokens: Return both extra class, eot tokens |
|
|
Returns: |
|
|
|
|
|
""" |
|
|
assert output_fmt in ('NCHW',), 'Output format must be == NCHW.' |
|
|
|
|
|
take_indices, max_index = feature_take_indices(5, indices) |
|
|
|
|
|
output = {} |
|
|
intermediates = [] |
|
|
blocks = [self.stem, self.layer1, self.layer2, self.layer3, self.layer4] |
|
|
if torch.jit.is_scripting() or not stop_early: |
|
|
blocks = blocks[:max_index + 1] |
|
|
for i, blk in enumerate(blocks): |
|
|
x = blk(x) |
|
|
if i in take_indices: |
|
|
intermediates.append(x) |
|
|
|
|
|
output['image_intermediates'] = intermediates |
|
|
|
|
|
if intermediates_only: |
|
|
return output |
|
|
|
|
|
x = self.attnpool(x) |
|
|
output['image_features'] = x |
|
|
|
|
|
return output |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.stem(x) |
|
|
x = self.layer1(x) |
|
|
x = self.layer2(x) |
|
|
x = self.layer3(x) |
|
|
x = self.layer4(x) |
|
|
x = self.attnpool(x) |
|
|
|
|
|
return x |
|
|
|