Sanchit Gandhi
commited on
Commit
·
9738fad
1
Parent(s):
0f637be
Create convert_scan_to_unrolled.py
Browse files- convert_scan_to_unrolled.py +47 -0
convert_scan_to_unrolled.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from models.modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
|
2 |
+
from transformers import SpeechEncoderDecoderModel, AutoConfig
|
3 |
+
from flax.traverse_util import flatten_dict, unflatten_dict
|
4 |
+
import collections
|
5 |
+
|
6 |
+
model_id = "sanchit-gandhi/flax-wav2vec2-2-bart-large-cv9-baseline-50k"
|
7 |
+
|
8 |
+
config = AutoConfig.from_pretrained(model_id)
|
9 |
+
config.encoder.use_scan = config.decoder.use_scan = False
|
10 |
+
|
11 |
+
unrolled_model = FlaxSpeechEncoderDecoderModel.from_pretrained(model_id, config=config)
|
12 |
+
model = FlaxSpeechEncoderDecoderModel.from_pretrained(model_id)
|
13 |
+
|
14 |
+
def scanned_to_unrolled(params):
|
15 |
+
new_enc_params = collections.defaultdict(dict)
|
16 |
+
# get the key of a scanned module
|
17 |
+
for key, stacked_weights in flatten_dict(params['encoder']['encoder']['layers']['FlaxWav2Vec2EncoderLayers']).items():
|
18 |
+
for layer, weights in enumerate(stacked_weights):
|
19 |
+
new_key = (str(layer),) + key
|
20 |
+
new_enc_params[new_key] = weights
|
21 |
+
|
22 |
+
new_enc_params = unflatten_dict({('encoder', 'layers') : unflatten_dict(new_enc_params)})
|
23 |
+
|
24 |
+
# repeat for the decoder (note that the key 'layers' appears one index to the right than in the encoder, thus we'll treat the encoder and decoder independently for now)
|
25 |
+
new_dec_params = collections.defaultdict(dict)
|
26 |
+
# get the key of a scanned module
|
27 |
+
for key, stacked_weights in flatten_dict(params['decoder']['model']['decoder']['layers']['FlaxBartDecoderLayers']).items():
|
28 |
+
for layer, weights in enumerate(stacked_weights):
|
29 |
+
new_key = (str(layer),) + key
|
30 |
+
new_dec_params[new_key] = weights
|
31 |
+
|
32 |
+
new_dec_params = unflatten_dict({('model', 'decoder', 'layers') : unflatten_dict(new_dec_params)})
|
33 |
+
|
34 |
+
# combine the encoder and decoder parameters
|
35 |
+
new_params = {'encoder': new_enc_params, 'decoder': new_dec_params}
|
36 |
+
new_params = flatten_dict(new_params)
|
37 |
+
|
38 |
+
# append parameters for non-scanned modules (i.e. all modules that do not contain the key 'layers')
|
39 |
+
for k in flatten_dict(params):
|
40 |
+
if 'layers' not in k or 'adapter' in k:
|
41 |
+
new_params[k] = flatten_dict(params)[k]
|
42 |
+
|
43 |
+
return unflatten_dict(new_params)
|
44 |
+
|
45 |
+
unrolled_model.params = scanned_to_unrolled(model.params)
|
46 |
+
|
47 |
+
unrolled_model.save_pretrained("./")
|