Create modular_rtdetrv2.py
Browse files- modular_rtdetrv2.py +339 -0
modular_rtdetrv2.py
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import os
|
3 |
+
import warnings
|
4 |
+
from dataclasses import dataclass
|
5 |
+
from functools import lru_cache, partial
|
6 |
+
from pathlib import Path
|
7 |
+
from typing import Dict, List, Optional, Tuple, Union
|
8 |
+
|
9 |
+
import torch
|
10 |
+
import torch.nn.functional as F
|
11 |
+
from torch import Tensor, nn
|
12 |
+
from torch.autograd import Function
|
13 |
+
from torch.autograd.function import once_differentiable
|
14 |
+
|
15 |
+
from transformers.activations import ACT2CLS, ACT2FN
|
16 |
+
from transformers.image_transforms import center_to_corners_format, corners_to_center_format
|
17 |
+
from transformers.modeling_outputs import BaseModelOutput
|
18 |
+
from transformers.modeling_utils import PreTrainedModel
|
19 |
+
from transformers.utils import (
|
20 |
+
ModelOutput,
|
21 |
+
add_start_docstrings,
|
22 |
+
add_start_docstrings_to_model_forward,
|
23 |
+
is_ninja_available,
|
24 |
+
is_scipy_available,
|
25 |
+
is_torch_cuda_available,
|
26 |
+
logging,
|
27 |
+
replace_return_docstrings,
|
28 |
+
requires_backends,
|
29 |
+
)
|
30 |
+
|
31 |
+
from transformers.models.rt_detr.configuration_rt_detr_resnet import RTDetrResNetConfig
|
32 |
+
from transformers.models.rt_detr.modeling_rt_detr import (
|
33 |
+
RTDetrConfig,
|
34 |
+
RTDetrDecoderOutput,
|
35 |
+
RTDetrModelOutput,
|
36 |
+
RTDetrObjectDetectionOutput,
|
37 |
+
RTDetrFrozenBatchNorm2d,
|
38 |
+
RTDetrConvEncoder,
|
39 |
+
RTDetrConvNormLayer,
|
40 |
+
RTDetrEncoderLayer,
|
41 |
+
RTDetrRepVggBlock,
|
42 |
+
RTDetrCSPRepLayer,
|
43 |
+
RTDetrMultiscaleDeformableAttention,
|
44 |
+
RTDetrMultiheadAttention,
|
45 |
+
RTDetrDecoderLayer,
|
46 |
+
RTDetrPreTrainedModel,
|
47 |
+
RTDetrEncoder,
|
48 |
+
RTDetrHybridEncoder,
|
49 |
+
RTDetrDecoder,
|
50 |
+
RTDetrModel,
|
51 |
+
RTDetrMLPPredictionHead,
|
52 |
+
RTDetrForObjectDetection
|
53 |
+
)
|
54 |
+
from transformers.loss.loss_rt_detr import (RTDetrLoss, RTDetrHungarianMatcher)
|
55 |
+
from transformers.utils.backbone_utils import load_backbone
|
56 |
+
|
57 |
+
# from .configuration_rt_detr_v2 import RTDetrV2Config TODO define the config
|
58 |
+
|
59 |
+
class RTDetrV2Config(RTDetrConfig):
|
60 |
+
model_type = "rt_detr_v2" # Update the model type
|
61 |
+
def __init__(
|
62 |
+
self,
|
63 |
+
decoder_n_levels=3,
|
64 |
+
decoder_offset_scale=0.5,
|
65 |
+
**kwargs
|
66 |
+
):
|
67 |
+
super().__init__(**kwargs)
|
68 |
+
self.decoder_n_levels = decoder_n_levels
|
69 |
+
self.decoder_offset_scale = decoder_offset_scale
|
70 |
+
|
71 |
+
class RTDetrV2ResNetConfig(RTDetrResNetConfig):
|
72 |
+
model_type = "rt_detr_v2_resnet"
|
73 |
+
|
74 |
+
|
75 |
+
logger = logging.get_logger(__name__)
|
76 |
+
|
77 |
+
|
78 |
+
class RTDetrV2DecoderOutput(RTDetrDecoderOutput):
|
79 |
+
pass
|
80 |
+
|
81 |
+
class RTDetrV2ModelOutput(RTDetrModelOutput):
|
82 |
+
pass
|
83 |
+
|
84 |
+
class RTDetrV2ObjectDetectionOutput(RTDetrObjectDetectionOutput):
|
85 |
+
pass
|
86 |
+
|
87 |
+
class RTDetrV2FrozenBatchNorm2d(RTDetrFrozenBatchNorm2d):
|
88 |
+
pass
|
89 |
+
|
90 |
+
|
91 |
+
class RTDetrV2ConvEncoder(RTDetrConvEncoder):
|
92 |
+
pass
|
93 |
+
|
94 |
+
class RTDetrV2ConvNormLayer(RTDetrConvNormLayer):
|
95 |
+
pass
|
96 |
+
|
97 |
+
class RTDetrV2EncoderLayer(RTDetrEncoderLayer):
|
98 |
+
pass
|
99 |
+
|
100 |
+
class RTDetrV2RepVggBlock(RTDetrRepVggBlock):
|
101 |
+
pass
|
102 |
+
|
103 |
+
class RTDetrV2CSPRepLayer(RTDetrCSPRepLayer):
|
104 |
+
pass
|
105 |
+
|
106 |
+
|
107 |
+
# new implementaiton of the multiscale deformable attention (v2)
|
108 |
+
def multi_scale_deformable_attention_v2(
|
109 |
+
value: Tensor,
|
110 |
+
value_spatial_shapes: Tensor,
|
111 |
+
sampling_locations: Tensor,
|
112 |
+
attention_weights: Tensor,
|
113 |
+
num_points_list: List[int],
|
114 |
+
method="default",
|
115 |
+
) -> Tensor:
|
116 |
+
batch_size, _, num_heads, hidden_dim = value.shape
|
117 |
+
_, num_queries, num_heads, num_levels, num_points = sampling_locations.shape
|
118 |
+
value_list = (
|
119 |
+
value.permute(0, 2, 3, 1)
|
120 |
+
.flatten(0, 1)
|
121 |
+
.split([height.item() * width.item() for height, width in value_spatial_shapes], dim=-1)
|
122 |
+
)
|
123 |
+
# sampling_offsets [8, 480, 8, 12, 2]
|
124 |
+
if method == "default":
|
125 |
+
sampling_grids = 2 * sampling_locations - 1
|
126 |
+
elif method == "discrete":
|
127 |
+
sampling_grids = sampling_locations
|
128 |
+
sampling_grids = sampling_grids.permute(0, 2, 1, 3, 4).flatten(0, 1)
|
129 |
+
sampling_grids = sampling_grids.split(num_points_list, dim=-2)
|
130 |
+
sampling_value_list = []
|
131 |
+
for level_id, (height, width) in enumerate(value_spatial_shapes):
|
132 |
+
# batch_size, height*width, num_heads, hidden_dim
|
133 |
+
# -> batch_size, height*width, num_heads*hidden_dim
|
134 |
+
# -> batch_size, num_heads*hidden_dim, height*width
|
135 |
+
# -> batch_size*num_heads, hidden_dim, height, width
|
136 |
+
value_l_ = value_list[level_id].reshape(batch_size * num_heads, hidden_dim, height, width)
|
137 |
+
# batch_size, num_queries, num_heads, num_points, 2
|
138 |
+
# -> batch_size, num_heads, num_queries, num_points, 2
|
139 |
+
# -> batch_size*num_heads, num_queries, num_points, 2
|
140 |
+
sampling_grid_l_ = sampling_grids[level_id]
|
141 |
+
# batch_size*num_heads, hidden_dim, num_queries, num_points
|
142 |
+
if method == "default":
|
143 |
+
sampling_value_l_ = nn.functional.grid_sample(
|
144 |
+
value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False
|
145 |
+
)
|
146 |
+
elif method == "discrete":
|
147 |
+
sampling_coord = (sampling_grid_l_ * torch.tensor([[width, height]], device=value.device) + 0.5).to(
|
148 |
+
torch.int64
|
149 |
+
)
|
150 |
+
|
151 |
+
# Separate clamping for x and y coordinates
|
152 |
+
sampling_coord_x = sampling_coord[..., 0].clamp(0, width - 1)
|
153 |
+
sampling_coord_y = sampling_coord[..., 1].clamp(0, height - 1)
|
154 |
+
|
155 |
+
# Combine the clamped coordinates
|
156 |
+
sampling_coord = torch.stack([sampling_coord_x, sampling_coord_y], dim=-1)
|
157 |
+
sampling_coord = sampling_coord.reshape(batch_size * num_heads, num_queries * num_points_list[level_id], 2)
|
158 |
+
sampling_idx = (
|
159 |
+
torch.arange(sampling_coord.shape[0], device=value.device)
|
160 |
+
.unsqueeze(-1)
|
161 |
+
.repeat(1, sampling_coord.shape[1])
|
162 |
+
)
|
163 |
+
sampling_value_l_ = value_l_[sampling_idx, :, sampling_coord[..., 1], sampling_coord[..., 0]]
|
164 |
+
sampling_value_l_ = sampling_value_l_.permute(0, 2, 1).reshape(
|
165 |
+
batch_size * num_heads, hidden_dim, num_queries, num_points_list[level_id]
|
166 |
+
)
|
167 |
+
sampling_value_list.append(sampling_value_l_)
|
168 |
+
# (batch_size, num_queries, num_heads, num_levels, num_points)
|
169 |
+
# -> (batch_size, num_heads, num_queries, num_levels, num_points)
|
170 |
+
# -> (batch_size, num_heads, 1, num_queries, num_levels*num_points)
|
171 |
+
attention_weights = attention_weights.permute(0, 2, 1, 3).reshape(
|
172 |
+
batch_size * num_heads, 1, num_queries, sum(num_points_list)
|
173 |
+
)
|
174 |
+
output = (
|
175 |
+
(torch.concat(sampling_value_list, dim=-1) * attention_weights)
|
176 |
+
.sum(-1)
|
177 |
+
.view(batch_size, num_heads * hidden_dim, num_queries)
|
178 |
+
)
|
179 |
+
return output.transpose(1, 2).contiguous()
|
180 |
+
|
181 |
+
|
182 |
+
def __init__(self, config: RTDetrV2Config):
|
183 |
+
super().__init__(config, config.decoder_attention_heads, config.decoder_n_points)
|
184 |
+
self.n_levels = config.decoder_n_levels
|
185 |
+
self.offset_scale = config.decoder_offset_scale
|
186 |
+
|
187 |
+
class RTDetrV2MultiscaleDeformableAttention(RTDetrMultiscaleDeformableAttention):
|
188 |
+
|
189 |
+
def __init__(self, config: RTDetrV2Config):
|
190 |
+
super().__init__(config, config.decoder_attention_heads, config.decoder_n_points)
|
191 |
+
self.n_levels = config.decoder_n_levels
|
192 |
+
self.offset_scale = config.decoder_offset_scale
|
193 |
+
n_points_list = [self.n_points for _ in range(self.n_levels)]
|
194 |
+
self.n_points_list = n_points_list
|
195 |
+
n_points_scale = [1 / n for n in n_points_list for _ in range(n)]
|
196 |
+
self.register_buffer("n_points_scale", torch.tensor(n_points_scale, dtype=torch.float32))
|
197 |
+
|
198 |
+
self._reset_parameters()
|
199 |
+
|
200 |
+
def _reset_parameters(self):
|
201 |
+
nn.init.constant_(self.sampling_offsets.weight.data, 0.0)
|
202 |
+
default_dtype = torch.get_default_dtype()
|
203 |
+
thetas = torch.arange(self.n_heads, dtype=torch.int64).to(default_dtype) * (2.0 * math.pi / self.n_heads)
|
204 |
+
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
|
205 |
+
grid_init = (
|
206 |
+
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
|
207 |
+
.view(self.n_heads, 1, 1, 2)
|
208 |
+
.repeat(1, self.n_levels, self.n_points, 1)
|
209 |
+
)
|
210 |
+
for i in range(self.n_points):
|
211 |
+
grid_init[:, :, i, :] *= i + 1
|
212 |
+
with torch.no_grad():
|
213 |
+
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
|
214 |
+
nn.init.constant_(self.attention_weights.weight.data, 0.0)
|
215 |
+
nn.init.constant_(self.attention_weights.bias.data, 0.0)
|
216 |
+
nn.init.xavier_uniform_(self.value_proj.weight.data)
|
217 |
+
nn.init.constant_(self.value_proj.bias.data, 0.0)
|
218 |
+
nn.init.xavier_uniform_(self.output_proj.weight.data)
|
219 |
+
nn.init.constant_(self.output_proj.bias.data, 0.0)
|
220 |
+
|
221 |
+
|
222 |
+
def forward(
|
223 |
+
self,
|
224 |
+
hidden_states: torch.Tensor,
|
225 |
+
attention_mask: Optional[torch.Tensor] = None,
|
226 |
+
encoder_hidden_states=None,
|
227 |
+
encoder_attention_mask=None,
|
228 |
+
position_embeddings: Optional[torch.Tensor] = None,
|
229 |
+
reference_points=None,
|
230 |
+
spatial_shapes=None,
|
231 |
+
level_start_index=None,
|
232 |
+
output_attentions: bool = False,
|
233 |
+
):
|
234 |
+
# add position embeddings to the hidden states before projecting to queries and keys
|
235 |
+
if position_embeddings is not None:
|
236 |
+
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
|
237 |
+
|
238 |
+
batch_size, num_queries, _ = hidden_states.shape
|
239 |
+
batch_size, sequence_length, _ = encoder_hidden_states.shape
|
240 |
+
if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
|
241 |
+
raise ValueError(
|
242 |
+
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
|
243 |
+
)
|
244 |
+
|
245 |
+
value = self.value_proj(encoder_hidden_states)
|
246 |
+
if attention_mask is not None:
|
247 |
+
# we invert the attention_mask
|
248 |
+
value = value.masked_fill(~attention_mask[..., None], float(0))
|
249 |
+
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
|
250 |
+
sampling_offsets = self.sampling_offsets(hidden_states).view(
|
251 |
+
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points, 2
|
252 |
+
)
|
253 |
+
attention_weights = self.attention_weights(hidden_states).view(
|
254 |
+
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
|
255 |
+
)
|
256 |
+
attention_weights = F.softmax(attention_weights, -1).view(
|
257 |
+
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
|
258 |
+
)
|
259 |
+
# batch_size, num_queries, n_heads, n_levels, n_points, 2
|
260 |
+
num_coordinates = reference_points.shape[-1]
|
261 |
+
if num_coordinates == 2:
|
262 |
+
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
|
263 |
+
sampling_locations = (
|
264 |
+
reference_points[:, :, None, :, None, :]
|
265 |
+
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
|
266 |
+
)
|
267 |
+
elif num_coordinates == 4:
|
268 |
+
n_points_scale = self.n_points_scale.to(dtype=hidden_states.dtype).unsqueeze(-1)
|
269 |
+
offset = sampling_offsets * n_points_scale * reference_points[:, :, None, :, 2:] * self.offset_scale
|
270 |
+
sampling_locations = reference_points[:, :, None, :, :2] + offset
|
271 |
+
else:
|
272 |
+
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}")
|
273 |
+
|
274 |
+
if self.disable_custom_kernels:
|
275 |
+
# PyTorch implementation
|
276 |
+
output = multi_scale_deformable_attention_v2(
|
277 |
+
value, spatial_shapes, sampling_locations, attention_weights, self.n_points_list
|
278 |
+
)
|
279 |
+
else:
|
280 |
+
try:
|
281 |
+
# custom kernel
|
282 |
+
output = MultiScaleDeformableAttentionFunction.apply(
|
283 |
+
value,
|
284 |
+
spatial_shapes,
|
285 |
+
level_start_index,
|
286 |
+
sampling_locations,
|
287 |
+
attention_weights,
|
288 |
+
self.im2col_step,
|
289 |
+
)
|
290 |
+
except Exception:
|
291 |
+
# PyTorch implementation
|
292 |
+
output = multi_scale_deformable_attention_v2(
|
293 |
+
value, spatial_shapes, sampling_locations, attention_weights, self.n_points_list
|
294 |
+
)
|
295 |
+
output = self.output_proj(output)
|
296 |
+
|
297 |
+
return output, attention_weights
|
298 |
+
|
299 |
+
class RTDetrV2MultiheadAttention(RTDetrMultiheadAttention):
|
300 |
+
pass
|
301 |
+
|
302 |
+
class RTDetrV2DecoderLayer(RTDetrDecoderLayer):
|
303 |
+
pass
|
304 |
+
|
305 |
+
|
306 |
+
class RTDetrV2PreTrainedModel(RTDetrPreTrainedModel):
|
307 |
+
config_class = RTDetrV2Config
|
308 |
+
base_model_prefix = "rt_detr_v2"
|
309 |
+
main_input_name = "pixel_values"
|
310 |
+
_no_split_modules = [r"RTDetrV2ConvEncoder", r"RTDetrV2EncoderLayer", r"RTDetrV2DecoderLayer"]
|
311 |
+
|
312 |
+
|
313 |
+
class RTDetrV2Encoder(RTDetrEncoder):
|
314 |
+
pass
|
315 |
+
|
316 |
+
class RTDetrV2HybridEncoder(RTDetrHybridEncoder):
|
317 |
+
pass
|
318 |
+
|
319 |
+
class RTDetrV2Decoder(RTDetrDecoder):
|
320 |
+
pass
|
321 |
+
|
322 |
+
|
323 |
+
class RTDetrV2Model(RTDetrModel):
|
324 |
+
pass
|
325 |
+
|
326 |
+
class RTDetrV2Loss(RTDetrLoss):
|
327 |
+
pass
|
328 |
+
|
329 |
+
|
330 |
+
class RTDetrV2MLPPredictionHead(RTDetrMLPPredictionHead):
|
331 |
+
pass
|
332 |
+
|
333 |
+
class RTDetrV2HungarianMatcher(RTDetrHungarianMatcher):
|
334 |
+
pass
|
335 |
+
|
336 |
+
|
337 |
+
class RTDetrV2ForObjectDetection(RTDetrForObjectDetection):
|
338 |
+
pass
|
339 |
+
|