rohitp1 commited on
Commit
268ecb8
·
1 Parent(s): 489ff8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -36,15 +36,15 @@ feat_ext3 = WhisperFeatureExtractor.from_pretrained(M3, use_auth_token=auth_toke
36
 
37
 
38
  # make quantized model
39
- quantized_model1 = torch.quantization.quantize_dynamic(
40
- model1, {torch.nn.Linear}, dtype=torch.qint8
41
- )
42
 
43
 
44
  p1 = pipeline('automatic-speech-recognition', model=model1, tokenizer=tokenizer1, feature_extractor=feat_ext1)
45
  p2 = pipeline('automatic-speech-recognition', model=model2, tokenizer=tokenizer2, feature_extractor=feat_ext2)
46
  p3 = pipeline('automatic-speech-recognition', model=model3, tokenizer=tokenizer3, feature_extractor=feat_ext3)
47
- p1_quant = pipeline('automatic-speech-recognition', model=quantized_model1, tokenizer=tokenizer1, feature_extractor=feat_ext1)
48
 
49
  def transcribe(mic_input, upl_input, model_type):
50
  if mic_input:
@@ -57,8 +57,8 @@ def transcribe(mic_input, upl_input, model_type):
57
  text = p2(audio)["text"]
58
  elif model_type == 'CleanFinetuned':
59
  text = p3(audio)["text"]
60
- elif model_type == 'DistilledAndQuantised':
61
- text = p1_quant(audio)['text']
62
  else:
63
  text = p1(audio)["text"]
64
  end_time = time.time()
@@ -115,7 +115,7 @@ if __name__ == "__main__":
115
  )
116
 
117
  with gr.Row():
118
- model_type = gr.inputs.Dropdown(["RobustDistillation", "NoisyFinetuned", "CleanFinetuned", "DistilledAndQuantised"], label='Model Type')
119
 
120
  with gr.Row():
121
  clr_btn = gr.Button(value="Clear", variant="secondary")
 
36
 
37
 
38
  # make quantized model
39
+ # quantized_model1 = torch.quantization.quantize_dynamic(
40
+ # model1, {torch.nn.Linear}, dtype=torch.qint8
41
+ # )
42
 
43
 
44
  p1 = pipeline('automatic-speech-recognition', model=model1, tokenizer=tokenizer1, feature_extractor=feat_ext1)
45
  p2 = pipeline('automatic-speech-recognition', model=model2, tokenizer=tokenizer2, feature_extractor=feat_ext2)
46
  p3 = pipeline('automatic-speech-recognition', model=model3, tokenizer=tokenizer3, feature_extractor=feat_ext3)
47
+ # p1_quant = pipeline('automatic-speech-recognition', model=quantized_model1, tokenizer=tokenizer1, feature_extractor=feat_ext1)
48
 
49
  def transcribe(mic_input, upl_input, model_type):
50
  if mic_input:
 
57
  text = p2(audio)["text"]
58
  elif model_type == 'CleanFinetuned':
59
  text = p3(audio)["text"]
60
+ # elif model_type == 'DistilledAndQuantised':
61
+ # text = p1_quant(audio)['text']
62
  else:
63
  text = p1(audio)["text"]
64
  end_time = time.time()
 
115
  )
116
 
117
  with gr.Row():
118
+ model_type = gr.inputs.Dropdown(["RobustDistillation", "NoisyFinetuned", "CleanFinetuned"], label='Model Type')
119
 
120
  with gr.Row():
121
  clr_btn = gr.Button(value="Clear", variant="secondary")