JH-C-k commited on
Commit
c20e049
Β·
verified Β·
1 Parent(s): ec54512

Add files using upload-large-folder tool

Browse files
__pycache__/transformer.cpython-310.pyc CHANGED
Binary files a/__pycache__/transformer.cpython-310.pyc and b/__pycache__/transformer.cpython-310.pyc differ
 
config.json CHANGED
@@ -24,71 +24,82 @@
24
  },
25
  "num_register_tokens": 1,
26
  "neuron_dict": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  "9": [
28
  815,
29
  4078,
30
  3618,
31
  2693,
32
  3973,
33
- 1744,
34
  1983,
 
35
  1157,
36
  1309,
37
  1335,
38
  2607,
39
- 2396,
40
  3049,
 
41
  1610,
42
  2621,
43
  2867,
 
44
  2012,
45
  1924,
46
- 2394,
47
  3097,
48
  3125,
49
- 3959,
50
  3210,
51
  2855,
52
- 3609,
53
- 526,
54
- 3362,
55
- 3395,
56
- 2626,
57
  503,
 
58
  2941,
59
- 3696,
60
- 1823,
61
- 2000,
62
- 129,
63
- 3667,
64
- 1372,
65
- 147,
66
- 1150,
67
- 852,
68
- 3222
69
- ],
70
- "8": [
71
- 745,
72
- 3249,
73
- 2585,
74
- 1537,
75
- 200,
76
- 1603,
77
- 1851,
78
- 3523,
79
- 3697,
80
- 3137,
81
- 2563,
82
- 2293,
83
- 730,
84
- 906,
85
- 1528,
86
- 3348,
87
- 2438,
88
- 1564,
89
- 1540,
90
- 3238,
91
- 3606
92
  ],
93
  "10": [
94
  357,
@@ -103,40 +114,33 @@
103
  1903,
104
  738,
105
  1602,
106
- 1807,
107
  2018,
108
- 1281,
 
109
  267,
110
  3539,
111
  1015,
112
- 496,
113
  693,
114
  2278,
115
- 7,
116
- 856,
117
- 2785,
118
- 2690,
119
- 1367
120
  ],
121
  "7": [
122
  3228,
123
  2550,
124
  2977,
125
- 3716,
126
- 2467
127
  ],
128
- "0": [
129
- 2890,
130
- 1779,
131
- 3761
132
  ],
133
  "6": [
134
  1042,
135
- 2315,
136
- 1674
137
  ],
138
- "3": [
139
- 410
 
140
  ]
141
  },
142
  "projection_dim": 768,
 
24
  },
25
  "num_register_tokens": 1,
26
  "neuron_dict": {
27
+ "12": [
28
+ 42,
29
+ 983,
30
+ 3868,
31
+ 2687,
32
+ 3008,
33
+ 3002,
34
+ 1571,
35
+ 1816,
36
+ 183,
37
+ 2780,
38
+ 2370,
39
+ 3574
40
+ ],
41
+ "11": [
42
+ 3784,
43
+ 987,
44
+ 3661,
45
+ 1967,
46
+ 9,
47
+ 1100,
48
+ 2555,
49
+ 888,
50
+ 3568
51
+ ],
52
+ "8": [
53
+ 745,
54
+ 3249,
55
+ 2585,
56
+ 1537,
57
+ 200,
58
+ 1603,
59
+ 3523,
60
+ 1851,
61
+ 3697,
62
+ 3137,
63
+ 2563,
64
+ 2293,
65
+ 906,
66
+ 730,
67
+ 1564,
68
+ 1528,
69
+ 3348
70
+ ],
71
  "9": [
72
  815,
73
  4078,
74
  3618,
75
  2693,
76
  3973,
 
77
  1983,
78
+ 1744,
79
  1157,
80
  1309,
81
  1335,
82
  2607,
 
83
  3049,
84
+ 2396,
85
  1610,
86
  2621,
87
  2867,
88
+ 2394,
89
  2012,
90
  1924,
91
+ 3959,
92
  3097,
93
  3125,
94
+ 3609,
95
  3210,
96
  2855,
97
+ 1372,
 
 
 
 
98
  503,
99
+ 3222,
100
  2941,
101
+ 2626,
102
+ 2000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  ],
104
  "10": [
105
  357,
 
114
  1903,
115
  738,
116
  1602,
 
117
  2018,
118
+ 1807,
119
+ 496,
120
  267,
121
  3539,
122
  1015,
 
123
  693,
124
  2278,
125
+ 514
 
 
 
 
126
  ],
127
  "7": [
128
  3228,
129
  2550,
130
  2977,
131
+ 3716
 
132
  ],
133
+ "13": [
134
+ 1109,
135
+ 2541
 
136
  ],
137
  "6": [
138
  1042,
139
+ 2315
 
140
  ],
141
+ "0": [
142
+ 2890,
143
+ 1779
144
  ]
145
  },
146
  "projection_dim": 768,
config_bak.json CHANGED
@@ -1,5 +1,9 @@
1
  {
2
  "model_type": "custom_clip_with_registers",
 
 
 
 
3
  "architectures": ["CustomCLIPModel"],
4
  "auto_map": {
5
  "AutoConfig": "modeling_custom_clip.CustomCLIPConfig",
@@ -19,91 +23,122 @@
19
  "max_position_embeddings": 77
20
  },
21
  "num_register_tokens": 1,
22
- "neuron_dict": {"10": [2924,
23
- 2520,
24
- 2936,
25
- 675,
26
- 517,
27
- 1610,
28
- 88,
29
- 1950,
30
- 3098,
31
- 4082,
32
- 1237,
33
- 857,
34
- 3020,
35
- 1321,
36
- 1128,
37
- 3561,
38
- 4091,
39
- 69,
40
- 3378,
41
- 2304,
42
- 977,
43
- 1762,
44
- 3598,
45
- 371,
46
- 1097],
47
- "9": [1253,
48
- 3658,
49
- 1827,
50
- 2600,
51
- 4000,
52
- 711,
53
- 2726,
54
- 615,
55
- 2654,
56
- 831,
57
- 1,
58
- 1387,
59
- 2178,
60
- 1967,
61
- 2413,
62
- 901,
63
- 481,
64
- 1514,
65
- 292,
66
- 692,
67
- 3094,
68
- 3470,
69
- 932,
70
- 2129],
71
- "8": [3189,
72
- 1491,
73
- 2159,
74
- 1196,
75
- 1913,
76
- 1340,
77
- 2515,
78
- 2163,
79
- 955,
80
- 1496,
81
- 1891,
82
- 1410,
83
- 3725,
84
- 632,
85
- 188,
86
- 726,
87
- 1592,
88
- 1017,
89
- 1267,
90
- 995,
91
- 3465,
92
- 3510,
93
- 1494,
94
- 3467,
95
- 1896,
96
- 2779,
97
- 2309,
98
- 3389,
99
- 3682,
100
- 1968,
101
- 2904],
102
- "7": [2226, 2565],
103
- "6": [1450, 1551, 1024],
104
- "5": [151, 1282],
105
- "4": [2207],
106
- "3": [2298, 2841]},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  "projection_dim": 768,
108
  "torch_dtype": "float32",
109
  "transformers_version": "4.21.0"
 
1
  {
2
  "model_type": "custom_clip_with_registers",
3
+
4
+ "processor_class": "CLIPProcessor",
5
+ "tokenizer_class": "CLIPTokenizerFast",
6
+
7
  "architectures": ["CustomCLIPModel"],
8
  "auto_map": {
9
  "AutoConfig": "modeling_custom_clip.CustomCLIPConfig",
 
23
  "max_position_embeddings": 77
24
  },
25
  "num_register_tokens": 1,
26
+ "neuron_dict": {
27
+ "9": [
28
+ 815,
29
+ 4078,
30
+ 3618,
31
+ 2693,
32
+ 3973,
33
+ 1744,
34
+ 1983,
35
+ 1157,
36
+ 1309,
37
+ 1335,
38
+ 2607,
39
+ 2396,
40
+ 3049,
41
+ 1610,
42
+ 2621,
43
+ 2867,
44
+ 2012,
45
+ 1924,
46
+ 2394,
47
+ 3097,
48
+ 3125,
49
+ 3959,
50
+ 3210,
51
+ 2855,
52
+ 3609,
53
+ 526,
54
+ 3362,
55
+ 3395,
56
+ 2626,
57
+ 503,
58
+ 2941,
59
+ 3696,
60
+ 1823,
61
+ 2000,
62
+ 129,
63
+ 3667,
64
+ 1372,
65
+ 147,
66
+ 1150,
67
+ 852,
68
+ 3222
69
+ ],
70
+ "8": [
71
+ 745,
72
+ 3249,
73
+ 2585,
74
+ 1537,
75
+ 200,
76
+ 1603,
77
+ 1851,
78
+ 3523,
79
+ 3697,
80
+ 3137,
81
+ 2563,
82
+ 2293,
83
+ 730,
84
+ 906,
85
+ 1528,
86
+ 3348,
87
+ 2438,
88
+ 1564,
89
+ 1540,
90
+ 3238,
91
+ 3606
92
+ ],
93
+ "10": [
94
+ 357,
95
+ 1654,
96
+ 3940,
97
+ 2319,
98
+ 2560,
99
+ 2559,
100
+ 4009,
101
+ 3029,
102
+ 951,
103
+ 1903,
104
+ 738,
105
+ 1602,
106
+ 1807,
107
+ 2018,
108
+ 1281,
109
+ 267,
110
+ 3539,
111
+ 1015,
112
+ 496,
113
+ 693,
114
+ 2278,
115
+ 7,
116
+ 856,
117
+ 2785,
118
+ 2690,
119
+ 1367
120
+ ],
121
+ "7": [
122
+ 3228,
123
+ 2550,
124
+ 2977,
125
+ 3716,
126
+ 2467
127
+ ],
128
+ "0": [
129
+ 2890,
130
+ 1779,
131
+ 3761
132
+ ],
133
+ "6": [
134
+ 1042,
135
+ 2315,
136
+ 1674
137
+ ],
138
+ "3": [
139
+ 410
140
+ ]
141
+ },
142
  "projection_dim": 768,
143
  "torch_dtype": "float32",
144
  "transformers_version": "4.21.0"
model_sanity_check.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c7a812f61be88b4148e6c910ea245178ff3663263d54680cdb99dd6bcaed9b32
3
- size 1711950230
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be119e7aad05ffa9d4c8748c6097f165ab9014c63c42835a345a50f82d3c50e7
3
+ size 933538166
transformer.py CHANGED
@@ -234,8 +234,9 @@ class MLP(nn.Module):
234
  x = self.c_fc(x)
235
 
236
  # If we have a dictionary of modifications and this layer is in it
237
- if neuron_dict is not None and self.layer_id in neuron_dict and num_register_tokens>0:
238
- neurons = neuron_dict[self.layer_id]
 
239
 
240
  # Apply GELU to all activations
241
  x_after_gelu = self.gelu(x)
 
234
  x = self.c_fc(x)
235
 
236
  # If we have a dictionary of modifications and this layer is in it
237
+ if neuron_dict is not None and str(self.layer_id) in neuron_dict and num_register_tokens>0:
238
+ # γ…‹γ…‹γ…‹γ…‹γ…‹γ…‹γ…‹γ…‹γ…‹γ…‹γ…‹γ…‹γ…‹γ…‹ ν•˜... str μ²˜λ¦¬κ°€ μ•„λ‹ˆλΌμ„œ κ·Έλž¬λ„€
239
+ neurons = neuron_dict[str(self.layer_id)]
240
 
241
  # Apply GELU to all activations
242
  x_after_gelu = self.gelu(x)