File size: 2,480 Bytes
9738fad
cb91e87
9738fad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cb91e87
 
 
 
 
7dd7eca
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
from models.modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
from transformers import SpeechEncoderDecoderModel, AutoConfig, AutoFeatureExtractor, AutoTokenizer
from flax.traverse_util import flatten_dict, unflatten_dict
import collections

model_id = "sanchit-gandhi/flax-wav2vec2-2-bart-large-cv9-baseline-50k"

config = AutoConfig.from_pretrained(model_id)
config.encoder.use_scan = config.decoder.use_scan = False

unrolled_model = FlaxSpeechEncoderDecoderModel.from_pretrained(model_id, config=config)
model = FlaxSpeechEncoderDecoderModel.from_pretrained(model_id)

def scanned_to_unrolled(params):
    new_enc_params = collections.defaultdict(dict)
    # get the key of a scanned module
    for key, stacked_weights in flatten_dict(params['encoder']['encoder']['layers']['FlaxWav2Vec2EncoderLayers']).items():
        for layer, weights in enumerate(stacked_weights):
            new_key = (str(layer),) + key
            new_enc_params[new_key] = weights
            
    new_enc_params = unflatten_dict({('encoder', 'layers') : unflatten_dict(new_enc_params)})

    # repeat for the decoder (note that the key 'layers' appears one index to the right than in the encoder, thus we'll treat the encoder and decoder independently for now)
    new_dec_params = collections.defaultdict(dict)
    # get the key of a scanned module
    for key, stacked_weights in flatten_dict(params['decoder']['model']['decoder']['layers']['FlaxBartDecoderLayers']).items():
        for layer, weights in enumerate(stacked_weights):
            new_key = (str(layer),) + key
            new_dec_params[new_key] = weights
            
    new_dec_params = unflatten_dict({('model', 'decoder', 'layers') : unflatten_dict(new_dec_params)})

    # combine the encoder and decoder parameters
    new_params = {'encoder': new_enc_params, 'decoder': new_dec_params}
    new_params = flatten_dict(new_params)

    # append parameters for non-scanned modules (i.e. all modules that do not contain the key 'layers')
    for k in flatten_dict(params):
        if 'layers' not in k or 'adapter' in k:
            new_params[k] = flatten_dict(params)[k]

    return unflatten_dict(new_params)

unrolled_model.params = scanned_to_unrolled(model.params)

unrolled_model.save_pretrained("./")

feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
feature_extractor.save_pretrained("./")
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.save_pretrained("./")