Upload 8 files
Browse files- __init__.py +0 -0
- config.json +29 -0
- configuration_satdino.py +44 -0
- modeling_satdino.py +307 -0
- pytorch_model.bin +3 -0
- satdino-vit_small-8-finetune.pth +3 -0
- satdino-vit_small-8.pth +3 -0
- utils.py +85 -0
__init__.py
ADDED
File without changes
|
config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"SatDINOModel"
|
4 |
+
],
|
5 |
+
"attn_drop_rate": 0.0,
|
6 |
+
"auto_map": {
|
7 |
+
"AutoConfig": "configuration_satdino.SatDINOConfig",
|
8 |
+
"AutoModel": "modeling_satdino.SatDINOModel"
|
9 |
+
},
|
10 |
+
"depth": 12,
|
11 |
+
"drop_path_rate": 0.0,
|
12 |
+
"drop_rate": 0.0,
|
13 |
+
"embed_dim": 384,
|
14 |
+
"img_size": [
|
15 |
+
224
|
16 |
+
],
|
17 |
+
"in_chans": 3,
|
18 |
+
"mlp_ratio": 4,
|
19 |
+
"model_type": "satdino",
|
20 |
+
"norm_layer": 1e-06,
|
21 |
+
"num_classes": 0,
|
22 |
+
"num_heads": 6,
|
23 |
+
"patch_size": 8,
|
24 |
+
"pos_encoding_method": "learnable",
|
25 |
+
"qk_scale": null,
|
26 |
+
"qkv_bias": true,
|
27 |
+
"transformers_version": "4.51.2",
|
28 |
+
"use_xformers": false
|
29 |
+
}
|
configuration_satdino.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PretrainedConfig
|
2 |
+
|
3 |
+
|
4 |
+
class SatDINOConfig(PretrainedConfig):
|
5 |
+
model_type = "satdino"
|
6 |
+
|
7 |
+
def __init__(
|
8 |
+
self,
|
9 |
+
img_size=[224],
|
10 |
+
patch_size=16,
|
11 |
+
in_chans=3,
|
12 |
+
num_classes=0,
|
13 |
+
embed_dim=768,
|
14 |
+
depth=12,
|
15 |
+
num_heads=12,
|
16 |
+
mlp_ratio=4.,
|
17 |
+
qkv_bias=False,
|
18 |
+
qk_scale=None,
|
19 |
+
drop_rate=0.,
|
20 |
+
attn_drop_rate=0.,
|
21 |
+
drop_path_rate=0.,
|
22 |
+
norm_layer=1e-6,
|
23 |
+
use_xformers=False,
|
24 |
+
pos_encoding_method="learnable",
|
25 |
+
**kwargs
|
26 |
+
):
|
27 |
+
self.img_size = img_size
|
28 |
+
self.patch_size = patch_size
|
29 |
+
self.in_chans = in_chans
|
30 |
+
self.num_classes = num_classes
|
31 |
+
self.embed_dim = embed_dim
|
32 |
+
self.depth = depth
|
33 |
+
self.num_heads = num_heads
|
34 |
+
self.mlp_ratio = mlp_ratio
|
35 |
+
self.qkv_bias = qkv_bias
|
36 |
+
self.qk_scale = qk_scale
|
37 |
+
self.drop_rate = drop_rate
|
38 |
+
self.attn_drop_rate = attn_drop_rate
|
39 |
+
self.drop_path_rate = drop_path_rate
|
40 |
+
self.norm_layer = norm_layer
|
41 |
+
self.use_xformers = use_xformers
|
42 |
+
self.pos_encoding_method = pos_encoding_method
|
43 |
+
super().__init__(**kwargs)
|
44 |
+
|
modeling_satdino.py
ADDED
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""
|
15 |
+
Mostly copy-paste from timm library.
|
16 |
+
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
|
17 |
+
"""
|
18 |
+
import os
|
19 |
+
import math
|
20 |
+
from functools import partial
|
21 |
+
|
22 |
+
import torch
|
23 |
+
import torch.nn as nn
|
24 |
+
|
25 |
+
from transformers import PreTrainedModel
|
26 |
+
from .utils import trunc_normal_, get_1d_sincos_pos_embed
|
27 |
+
from .configuration_satdino import SatDINOConfig
|
28 |
+
|
29 |
+
try:
|
30 |
+
from xformers.helpers.timm_sparse_attention import TimmSparseAttention
|
31 |
+
except:
|
32 |
+
TimmSparseAttention = None
|
33 |
+
|
34 |
+
|
35 |
+
def drop_path(x, drop_prob: float = 0., training: bool = False):
|
36 |
+
if drop_prob == 0. or not training:
|
37 |
+
return x
|
38 |
+
keep_prob = 1 - drop_prob
|
39 |
+
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
40 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
|
41 |
+
random_tensor.floor_() # binarize
|
42 |
+
output = x.div(keep_prob) * random_tensor
|
43 |
+
return output
|
44 |
+
|
45 |
+
|
46 |
+
class DropPath(nn.Module):
|
47 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
48 |
+
"""
|
49 |
+
|
50 |
+
def __init__(self, drop_prob=None):
|
51 |
+
super(DropPath, self).__init__()
|
52 |
+
self.drop_prob = drop_prob
|
53 |
+
|
54 |
+
def forward(self, x):
|
55 |
+
return drop_path(x, self.drop_prob, self.training)
|
56 |
+
|
57 |
+
|
58 |
+
class Mlp(nn.Module):
|
59 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
60 |
+
super().__init__()
|
61 |
+
out_features = out_features or in_features
|
62 |
+
hidden_features = hidden_features or in_features
|
63 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
64 |
+
self.act = act_layer()
|
65 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
66 |
+
self.drop = nn.Dropout(drop)
|
67 |
+
|
68 |
+
def forward(self, x):
|
69 |
+
x = self.fc1(x)
|
70 |
+
x = self.act(x)
|
71 |
+
x = self.drop(x)
|
72 |
+
x = self.fc2(x)
|
73 |
+
x = self.drop(x)
|
74 |
+
return x
|
75 |
+
|
76 |
+
|
77 |
+
class Attention(nn.Module):
|
78 |
+
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
|
79 |
+
super().__init__()
|
80 |
+
self.num_heads = num_heads
|
81 |
+
head_dim = dim // num_heads
|
82 |
+
self.scale = qk_scale or head_dim ** -0.5
|
83 |
+
|
84 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
85 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
86 |
+
self.proj = nn.Linear(dim, dim)
|
87 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
88 |
+
|
89 |
+
def forward(self, x):
|
90 |
+
B, N, C = x.shape
|
91 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
92 |
+
q, k, v = qkv[0], qkv[1], qkv[2]
|
93 |
+
|
94 |
+
attn = (q @ k.transpose(-2, -1)) * self.scale
|
95 |
+
attn = attn.softmax(dim=-1)
|
96 |
+
attn = self.attn_drop(attn)
|
97 |
+
|
98 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
99 |
+
x = self.proj(x)
|
100 |
+
x = self.proj_drop(x)
|
101 |
+
return x, attn
|
102 |
+
|
103 |
+
|
104 |
+
class Block(nn.Module):
|
105 |
+
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
106 |
+
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_xformers=False):
|
107 |
+
super().__init__()
|
108 |
+
self.norm1 = norm_layer(dim)
|
109 |
+
|
110 |
+
if TimmSparseAttention is not None and use_xformers:
|
111 |
+
# print("Using xFormers attention.")
|
112 |
+
self.attn = TimmSparseAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop,
|
113 |
+
proj_drop=drop)
|
114 |
+
else:
|
115 |
+
# print("Using timm attention.")
|
116 |
+
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
|
117 |
+
proj_drop=drop)
|
118 |
+
|
119 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
120 |
+
self.norm2 = norm_layer(dim)
|
121 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
122 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
123 |
+
|
124 |
+
def forward(self, x, return_attention=False):
|
125 |
+
attn_res = self.attn(self.norm1(x))
|
126 |
+
if not isinstance(attn_res, tuple):
|
127 |
+
attn_res = (attn_res, None)
|
128 |
+
y, attn = attn_res
|
129 |
+
|
130 |
+
if return_attention:
|
131 |
+
return attn
|
132 |
+
x = x + self.drop_path(y)
|
133 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
134 |
+
return x
|
135 |
+
|
136 |
+
|
137 |
+
class PatchEmbed(nn.Module):
|
138 |
+
""" Image to Patch Embedding
|
139 |
+
"""
|
140 |
+
|
141 |
+
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
|
142 |
+
super().__init__()
|
143 |
+
num_patches = (img_size // patch_size) * (img_size // patch_size)
|
144 |
+
self.img_size = img_size
|
145 |
+
self.patch_size = patch_size
|
146 |
+
self.num_patches = num_patches
|
147 |
+
|
148 |
+
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
149 |
+
|
150 |
+
def forward(self, x):
|
151 |
+
B, C, H, W = x.shape
|
152 |
+
x = self.proj(x).flatten(2).transpose(1, 2)
|
153 |
+
return x
|
154 |
+
|
155 |
+
|
156 |
+
class SatDINOModel(PreTrainedModel):
|
157 |
+
""" Vision Transformer """
|
158 |
+
config_class = SatDINOConfig
|
159 |
+
|
160 |
+
def __init__(self, config):
|
161 |
+
super().__init__(config)
|
162 |
+
self.num_features = self.embed_dim = config.embed_dim
|
163 |
+
self.pos_encoding_method = config.pos_encoding_method
|
164 |
+
|
165 |
+
self.patch_embed = PatchEmbed(
|
166 |
+
img_size=config.img_size[0],
|
167 |
+
patch_size=config.patch_size,
|
168 |
+
in_chans=config.in_chans,
|
169 |
+
embed_dim=config.embed_dim
|
170 |
+
)
|
171 |
+
num_patches = self.patch_embed.num_patches
|
172 |
+
self.num_patches = num_patches
|
173 |
+
|
174 |
+
# cls token
|
175 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim))
|
176 |
+
trunc_normal_(self.cls_token, std=.02)
|
177 |
+
self.gsd_register = nn.Parameter(torch.zeros(1, 1, config.embed_dim))
|
178 |
+
trunc_normal_(self.gsd_register, std=.02)
|
179 |
+
|
180 |
+
# positional encoding
|
181 |
+
if config.pos_encoding_method == "learnable":
|
182 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, config.embed_dim))
|
183 |
+
trunc_normal_(self.pos_embed, std=.02)
|
184 |
+
elif config.pos_encoding_method == "sin_cos":
|
185 |
+
positions = torch.arange(num_patches + 2)
|
186 |
+
self.pos_embed = get_1d_sincos_pos_embed(config.embed_dim, positions).unsqueeze(0).cuda()
|
187 |
+
|
188 |
+
# define blocks
|
189 |
+
norm_layer = partial(nn.LayerNorm, eps=config.norm_layer)
|
190 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.depth)] # stochastic depth decay rule
|
191 |
+
block_kwargs = {
|
192 |
+
"dim": config.embed_dim,
|
193 |
+
"num_heads": config.num_heads,
|
194 |
+
"mlp_ratio": config.mlp_ratio,
|
195 |
+
"qkv_bias": config.qkv_bias,
|
196 |
+
"qk_scale": config.qk_scale,
|
197 |
+
"drop": config.drop_rate,
|
198 |
+
"attn_drop": config.attn_drop_rate,
|
199 |
+
"norm_layer": norm_layer,
|
200 |
+
"use_xformers": config.use_xformers
|
201 |
+
}
|
202 |
+
self.blocks = nn.ModuleList([Block(drop_path=dpr[i], **block_kwargs) for i in range(config.depth)])
|
203 |
+
|
204 |
+
self.pos_drop = nn.Dropout(p=config.drop_rate)
|
205 |
+
self.norm = norm_layer(config.embed_dim)
|
206 |
+
|
207 |
+
# Classifier head
|
208 |
+
self.head = nn.Linear(config.embed_dim, config.num_classes) if config.num_classes > 0 else None
|
209 |
+
|
210 |
+
self.apply(self._init_weights)
|
211 |
+
|
212 |
+
def _init_weights(self, m):
|
213 |
+
if isinstance(m, nn.Linear):
|
214 |
+
trunc_normal_(m.weight, std=.02)
|
215 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
216 |
+
nn.init.constant_(m.bias, 0)
|
217 |
+
elif isinstance(m, nn.LayerNorm):
|
218 |
+
nn.init.constant_(m.bias, 0)
|
219 |
+
nn.init.constant_(m.weight, 1.0)
|
220 |
+
|
221 |
+
def interpolate_pos_encoding(self, x, w, h):
|
222 |
+
npatch = x.shape[1] - 1
|
223 |
+
N = self.pos_embed.shape[1] - 1
|
224 |
+
if npatch == N and w == h:
|
225 |
+
return self.pos_embed
|
226 |
+
class_pos_embed = self.pos_embed[:, 0]
|
227 |
+
patch_pos_embed = self.pos_embed[:, 1:-1]
|
228 |
+
register_pos_embed = self.pos_embed[:, -1]
|
229 |
+
dim = x.shape[-1]
|
230 |
+
w0 = w // self.patch_embed.patch_size
|
231 |
+
h0 = h // self.patch_embed.patch_size
|
232 |
+
# we add a small number to avoid floating point error in the interpolation
|
233 |
+
# see discussion at https://github.com/facebookresearch/dino/issues/8
|
234 |
+
w0, h0 = w0 + 0.1, h0 + 0.1
|
235 |
+
patch_pos_embed = nn.functional.interpolate(
|
236 |
+
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
|
237 |
+
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
|
238 |
+
mode='bicubic',
|
239 |
+
)
|
240 |
+
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
|
241 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
242 |
+
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed, register_pos_embed.unsqueeze(0)), dim=1)
|
243 |
+
|
244 |
+
def prepare_tokens(self, x):
|
245 |
+
B, nc, w, h = x.shape
|
246 |
+
x = self.patch_embed(x) # patch linear embedding
|
247 |
+
|
248 |
+
# add the [CLS] token to the embed patch tokens
|
249 |
+
cls_tokens = self.cls_token.expand(B, -1, -1)
|
250 |
+
gsd_register = self.gsd_register.expand(B, -1, -1)
|
251 |
+
x = torch.cat((cls_tokens, x, gsd_register), dim=1)
|
252 |
+
|
253 |
+
# add positional encoding to each token
|
254 |
+
x = x + self.interpolate_pos_encoding(x, w, h)
|
255 |
+
|
256 |
+
return self.pos_drop(x)
|
257 |
+
|
258 |
+
def forward(self, x, return_all=False, return_registers=False):
|
259 |
+
x = self.prepare_tokens(x)
|
260 |
+
for blk in self.blocks:
|
261 |
+
x = blk(x)
|
262 |
+
x = self.norm(x)
|
263 |
+
|
264 |
+
if return_all:
|
265 |
+
return x
|
266 |
+
|
267 |
+
if return_registers:
|
268 |
+
return x[:, 0], x[:, -1]
|
269 |
+
|
270 |
+
return x[:, 0]
|
271 |
+
|
272 |
+
def forward_intermediate_layers(self, x, return_all=False):
|
273 |
+
output = []
|
274 |
+
x = self.prepare_tokens(x)
|
275 |
+
for blk in self.blocks:
|
276 |
+
x = blk(x)
|
277 |
+
if return_all:
|
278 |
+
output.append(self.norm(x[:, :-1]))
|
279 |
+
else:
|
280 |
+
output.append(x[:, 0])
|
281 |
+
|
282 |
+
return output
|
283 |
+
|
284 |
+
def get_last_selfattention(self, x):
|
285 |
+
x = self.prepare_tokens(x)
|
286 |
+
for i, blk in enumerate(self.blocks):
|
287 |
+
if i < len(self.blocks) - 1:
|
288 |
+
x = blk(x)
|
289 |
+
else:
|
290 |
+
# return attention of the last block
|
291 |
+
return blk(x, return_attention=True)
|
292 |
+
|
293 |
+
def get_intermediate_layers(self, x, n=1):
|
294 |
+
x = self.prepare_tokens(x)
|
295 |
+
# we return the output tokens from the `n` last blocks
|
296 |
+
output = []
|
297 |
+
for i, blk in enumerate(self.blocks):
|
298 |
+
x = blk(x)
|
299 |
+
if len(self.blocks) - i <= n:
|
300 |
+
output.append(self.norm(x))
|
301 |
+
return output
|
302 |
+
|
303 |
+
|
304 |
+
|
305 |
+
|
306 |
+
|
307 |
+
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2a90d61c29e1e95b0eeeddd368a978fce29e5ed85c04a359a87790e68551d40d
|
3 |
+
size 86732262
|
satdino-vit_small-8-finetune.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:40f043a38d182f27295dd17315a4dc222743577f639b5fe0a9ff2d874e5dfdcb
|
3 |
+
size 86840225
|
satdino-vit_small-8.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad8994b4ebd293ff13be1cdd46f67900848f2f1918e77994c4b72368a0921ae3
|
3 |
+
size 86831772
|
utils.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""
|
15 |
+
Misc functions.
|
16 |
+
|
17 |
+
Mostly copy-paste from torchvision references or other public repos like DETR:
|
18 |
+
https://github.com/facebookresearch/detr/blob/master/util/misc.py
|
19 |
+
"""
|
20 |
+
import torch
|
21 |
+
import math
|
22 |
+
import warnings
|
23 |
+
|
24 |
+
|
25 |
+
def get_1d_sincos_pos_embed(embed_dim, pos, gsd=1, ref_gsd=1):
|
26 |
+
"""
|
27 |
+
embed_dim: output dimension for each position
|
28 |
+
pos: a list of positions to be encoded: size (M,)
|
29 |
+
out: (M, D)
|
30 |
+
"""
|
31 |
+
assert embed_dim % 2 == 0
|
32 |
+
omega = torch.arange(embed_dim // 2, dtype=torch.float, device=pos.device)
|
33 |
+
omega /= embed_dim / 2.
|
34 |
+
omega = 1. / 10000**omega # (D/2,)
|
35 |
+
|
36 |
+
pos = pos.reshape(-1) # (M,)
|
37 |
+
out = torch.einsum('m,d->md', pos, omega) # (M, D/2), outer product
|
38 |
+
|
39 |
+
emb_sin = torch.sin(gsd/ref_gsd * out) # (M, D/2)
|
40 |
+
emb_cos = torch.cos(gsd/ref_gsd * out) # (M, D/2)
|
41 |
+
|
42 |
+
emb = torch.zeros([len(pos), embed_dim]) # (M, D)
|
43 |
+
emb[:, 0::2] = emb_sin
|
44 |
+
emb[:, 1::2] = emb_cos
|
45 |
+
|
46 |
+
return emb.float()
|
47 |
+
|
48 |
+
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
|
49 |
+
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
50 |
+
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
51 |
+
def norm_cdf(x):
|
52 |
+
# Computes standard normal cumulative distribution function
|
53 |
+
return (1. + math.erf(x / math.sqrt(2.))) / 2.
|
54 |
+
|
55 |
+
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
56 |
+
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
57 |
+
"The distribution of values may be incorrect.",
|
58 |
+
stacklevel=2)
|
59 |
+
|
60 |
+
with torch.no_grad():
|
61 |
+
# Values are generated by using a truncated uniform distribution and
|
62 |
+
# then using the inverse CDF for the normal distribution.
|
63 |
+
# Get upper and lower cdf values
|
64 |
+
l = norm_cdf((a - mean) / std)
|
65 |
+
u = norm_cdf((b - mean) / std)
|
66 |
+
|
67 |
+
# Uniformly fill tensor with values from [l, u], then translate to
|
68 |
+
# [2l-1, 2u-1].
|
69 |
+
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
70 |
+
|
71 |
+
# Use inverse cdf transform for normal distribution to get truncated
|
72 |
+
# standard normal
|
73 |
+
tensor.erfinv_()
|
74 |
+
|
75 |
+
# Transform to proper mean, std
|
76 |
+
tensor.mul_(std * math.sqrt(2.))
|
77 |
+
tensor.add_(mean)
|
78 |
+
|
79 |
+
# Clamp to ensure it's in the proper range
|
80 |
+
tensor.clamp_(min=a, max=b)
|
81 |
+
return tensor
|
82 |
+
|
83 |
+
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
|
84 |
+
# type: (Tensor, float, float, float, float) -> Tensor
|
85 |
+
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|