Loren commited on
Commit
da70720
·
verified ·
1 Parent(s): b8ccc65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -10
app.py CHANGED
@@ -32,17 +32,16 @@ def process_transcript(model, processor, language, audio_path):
32
  if audio_path is None:
33
  print("***** audio_path VIDE")
34
  raise gr.Error("Please provide some input audio: either upload an audio file or use the microphone")
 
35
  else:
36
- print("***** audio_path NON VIDE")
37
- print("audio_path:", audio_path)
38
- id_language = dict_languages[language]
39
- inputs = processor.apply_transcrition_request(language=id_language, audio=audio_path, model_id=model_name)
40
- inputs = inputs.to(device, dtype=torch.bfloat16)
41
-
42
- outputs = model.generate(**inputs, max_new_tokens=MAX_TOKENS)
43
- decoded_outputs = processor.batch_decode(outputs[:, inputs.input_ids.shape[1]:], skip_special_tokens=True)
44
-
45
- return decoded_outputs[0]
46
 
47
 
48
 
 
32
  if audio_path is None:
33
  print("***** audio_path VIDE")
34
  raise gr.Error("Please provide some input audio: either upload an audio file or use the microphone")
35
+ return "Please upload an audio file."
36
  else:
37
+ id_language = dict_languages[language]
38
+ inputs = processor.apply_transcrition_request(language=id_language, audio=audio_path, model_id=model_name)
39
+ inputs = inputs.to(device, dtype=torch.bfloat16)
40
+
41
+ outputs = model.generate(**inputs, max_new_tokens=MAX_TOKENS)
42
+ decoded_outputs = processor.batch_decode(outputs[:, inputs.input_ids.shape[1]:], skip_special_tokens=True)
43
+
44
+ return decoded_outputs[0]
 
 
45
 
46
 
47