implemented dinounet
This commit is contained in:
0
src/__init__.py
Normal file
0
src/__init__.py
Normal file
0
src/model/__init__.py
Normal file
0
src/model/__init__.py
Normal file
201
src/model/dinounet.py
Normal file
201
src/model/dinounet.py
Normal file
@@ -0,0 +1,201 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from transformers import DINOv3ViTModel
|
||||
|
||||
@dataclass
|
||||
class DinoUNETConfig:
|
||||
model_name: str = "facebook/dinov2-small"
|
||||
num_classes: int = 2
|
||||
features_per_stage: List[int] = [32, 64, 128, 256]
|
||||
n_conv_per_stage_decoder: List[int] = [2, 2, 2]
|
||||
deep_supervision: bool = False
|
||||
rank: int = 256
|
||||
|
||||
class SqueezeExcitation(nn.Module):
|
||||
def __init__(self, channels: int, reduction: int=16):
|
||||
super().__init__()
|
||||
|
||||
self.pool = nn.AdaptiveAvgPool2d(1)
|
||||
self.fc = nn.Sequential(
|
||||
nn.Conv2d(channels, max(1, channels // reduction), 1),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(max(1, channels // reduction), channels, 1),
|
||||
nn.Sigmoid()
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
return x * self.fc(self.pool(x))
|
||||
|
||||
class DepthwiseSeparableConv(nn.Module):
|
||||
def __init__(self, in_ch: int, out_ch: int, kernel_size: int=3, stride: int=1, padding: int=1):
|
||||
super().__init__()
|
||||
|
||||
self.depthwise = nn.Conv2d(in_ch, in_ch, kernel_size, stride, padding, groups=in_ch)
|
||||
self.pointwise = nn.Conv2d(in_ch, out_ch, 1)
|
||||
self.bn = nn.BatchNorm2d(out_ch)
|
||||
self.act = nn.ReLU(True)
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
return self.act(self.bn(self.pointwise(self.depthwise(x))))
|
||||
|
||||
class LearnableUpsample(nn.Module):
|
||||
def __init__(self, channels: int):
|
||||
super().__init__()
|
||||
|
||||
self.up2 = nn.ConvTranspose2d(channels, channels, 2, 2)
|
||||
|
||||
def forward(self, x: torch.Tensor, target_size: tuple[int, int]):
|
||||
h, w = x.shape[2:]
|
||||
out = x
|
||||
while h * 2 <= target_size[0] and w * 2 <= target_size[1]:
|
||||
out = self.up2(out)
|
||||
h, w = out.shape[2:]
|
||||
if (h, w) != target_size:
|
||||
out = F.interpolate(out, target_size, mode='bilinear', align_corners=False)
|
||||
return out
|
||||
|
||||
class FAPM(nn.Module):
|
||||
def __init__(self, in_ch: int, rank: int, out_ch_list: list[int]):
|
||||
super().__init__()
|
||||
|
||||
self.shared_basis = nn.Conv2d(in_ch, rank, 1)
|
||||
self.specific_bases = nn.ModuleList([nn.Conv2d(in_ch, rank, 1) for _ in out_ch_list])
|
||||
self.film_generators = nn.ModuleList([nn.Conv2d(rank, rank * 2, 1) for _ in out_ch_list])
|
||||
self.refinement_blocks = nn.ModuleList()
|
||||
self.shortcuts = nn.ModuleList()
|
||||
|
||||
for oc in out_ch_list:
|
||||
self.refinement_blocks.append(nn.Sequential(
|
||||
nn.Conv2d(rank, oc, 1),
|
||||
nn.BatchNorm2d(oc),
|
||||
nn.ReLU(True),
|
||||
DepthwiseSeparableConv(oc, oc),
|
||||
nn.Conv2d(oc, oc, 1),
|
||||
SqueezeExcitation(oc)
|
||||
))
|
||||
self.shortcuts.append(nn.Conv2d(rank, oc, 1) if rank != oc else nn.Identity())
|
||||
|
||||
def forward(self, x_list: list[torch.Tensor]):
|
||||
out = []
|
||||
for i, x in enumerate(x_list):
|
||||
z_shared = self.shared_basis(x)
|
||||
z_specific = self.specific_bases[i](x)
|
||||
gamma, beta = torch.chunk(self.film_generators[i](z_shared), 2, dim=1)
|
||||
z_modulated = gamma * z_specific + beta
|
||||
refined = self.refinement_blocks[i](z_modulated)
|
||||
shortcut = self.shortcuts[i](z_modulated)
|
||||
out.append(refined + shortcut)
|
||||
return out
|
||||
|
||||
class DINOv3Encoder(nn.Module):
|
||||
def __init__(self, config: DinoUNETConfig):
|
||||
super().__init__()
|
||||
|
||||
self.output_channels = config.features_per_stage
|
||||
self.strides = [(2, 2)] * len(config.features_per_stage)
|
||||
|
||||
self.backbone = DINOv3ViTModel.from_pretrained(config.model_name)
|
||||
self.fapm = FAPM(self.backbone.config.hidden_size, config.rank, config.features_per_stage)
|
||||
self.ups = nn.ModuleList([LearnableUpsample(oc) for oc in config.features_per_stage])
|
||||
|
||||
|
||||
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
|
||||
B, C, H, W = x.shape
|
||||
|
||||
if C == 1:
|
||||
x = x.repeat(1, 3, 1, 1)
|
||||
elif C != 3:
|
||||
x = x[:, :3] if C > 3 else F.pad(x, (0, 0, 0, 0, 0, 3 - C))[:, :3]
|
||||
|
||||
outputs = self.backbone(x, output_hidden_states=True, return_dict=True)
|
||||
hidden_states = outputs.hidden_states
|
||||
|
||||
h, w = H // self.backbone.config.patch_size, W // self.backbone.config.patch_size
|
||||
features = []
|
||||
indices = [3, 6, 9, 12] if len(hidden_states) > 12 else [2, 4, 6, -1]
|
||||
|
||||
for idx in indices:
|
||||
feat = hidden_states[idx][:, 1:].transpose(1, 2).reshape(B, -1, h, w)
|
||||
features.append(feat)
|
||||
|
||||
features = self.fapm(features)
|
||||
skips = []
|
||||
for i, feat in enumerate(features):
|
||||
target_size = (H // (2 ** i), W // (2 ** i))
|
||||
skips.append(self.ups[i](feat, target_size))
|
||||
|
||||
return skips
|
||||
|
||||
class ConvBlock(nn.Module):
|
||||
def __init__(self, in_ch: int, out_ch: int, kernel_size: int=3, stride: int=1):
|
||||
super().__init__()
|
||||
|
||||
self.conv = nn.Conv2d(in_ch, out_ch, kernel_size, stride, kernel_size // 2)
|
||||
self.bn = nn.BatchNorm2d(out_ch)
|
||||
self.act = nn.ReLU(True)
|
||||
|
||||
def forward(self, x):
|
||||
return self.act(self.bn(self.conv(x)))
|
||||
|
||||
class StackedConvBlocks(nn.Module):
|
||||
def __init__(self, n_blocks: int, in_ch: int, out_ch: int, kernel_size: int=3):
|
||||
super().__init__()
|
||||
|
||||
self.blocks = nn.Sequential(*(
|
||||
[ConvBlock(in_ch, out_ch, kernel_size)] +
|
||||
[ConvBlock(out_ch, out_ch, kernel_size) for _ in range(n_blocks - 1)]
|
||||
))
|
||||
|
||||
def forward(self, x):
|
||||
return self.blocks(x)
|
||||
|
||||
class UNetDecoder(nn.Module):
|
||||
def __init__(self, encoder: DINOv3Encoder, config: DinoUNETConfig):
|
||||
super().__init__()
|
||||
self.deep_supervision = config.deep_supervision
|
||||
self.encoder = encoder
|
||||
|
||||
self.stages = nn.ModuleList()
|
||||
self.transpconvs = nn.ModuleList()
|
||||
self.seg_layers = nn.ModuleList()
|
||||
|
||||
for s in range(1, len(encoder.output_channels)):
|
||||
in_below = encoder.output_channels[-s]
|
||||
in_skip = encoder.output_channels[-(s + 1)]
|
||||
stride = encoder.strides[-s]
|
||||
|
||||
self.transpconvs.append(nn.ConvTranspose2d(in_below, in_skip, stride, stride))
|
||||
self.stages.append(StackedConvBlocks(
|
||||
config.n_conv_per_stage_decoder[s-1],
|
||||
2 * in_skip,
|
||||
in_skip
|
||||
))
|
||||
self.seg_layers.append(nn.Conv2d(in_skip, config.num_classes, 1))
|
||||
|
||||
def forward(self, skips: list[torch.Tensor]) -> torch.Tensor | list[torch.Tensor]: # only list if doing deep supervision
|
||||
lres_input = skips[-1]
|
||||
seg_outputs = []
|
||||
|
||||
for s in range(len(self.stages)):
|
||||
x = self.transpconvs[s](lres_input)
|
||||
x = torch.cat((x, skips[-(s+2)]), 1)
|
||||
x = self.stages[s](x)
|
||||
if self.deep_supervision or s == len(self.stages) - 1:
|
||||
seg_outputs.append(self.seg_layers[s](x))
|
||||
lres_input = x
|
||||
|
||||
seg_outputs = seg_outputs[::-1]
|
||||
return seg_outputs if self.deep_supervision else seg_outputs[0]
|
||||
|
||||
class DinoUNet(nn.Module):
|
||||
def __init__(self, config: DinoUNETConfig):
|
||||
super().__init__()
|
||||
self.encoder = DINOv3Encoder(config)
|
||||
self.decoder = UNetDecoder(self.encoder, config)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
skips = self.encoder(x)
|
||||
return self.decoder(skips)
|
||||
Reference in New Issue
Block a user