Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| import whisper | |
| from transformers import pipeline | |
| ### ββββββββββββββββββββββββββββββββββββββββ | |
| title="Whisper to Emotion" | |
| ### ββββββββββββββββββββββββββββββββββββββββ | |
| whisper_model = whisper.load_model("medium") | |
| device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
| emotion_classifier = pipeline("text-classification",model='bhadresh-savani/distilbert-base-uncased-emotion') | |
| def translate_and_classify(audio): | |
| print(""" | |
| β | |
| Sending audio to Whisper ... | |
| β | |
| """) | |
| audio = whisper.load_audio(audio) | |
| audio = whisper.pad_or_trim(audio) | |
| mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device) | |
| _, probs = whisper_model.detect_language(mel) | |
| transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False) | |
| translate_options = whisper.DecodingOptions(task="translate", fp16 = False) | |
| transcription = whisper.decode(whisper_model, mel, transcript_options) | |
| translation = whisper.decode(whisper_model, mel, translate_options) | |
| print("Language Spoken: " + transcription.language) | |
| print("Transcript: " + transcription.text) | |
| print("Translated: " + translation.text) | |
| emotion = emotion_classifier(translation.text) | |
| detected_emotion = emotion[0]["label"] | |
| print("Detected Emotions are", detected_emotion) | |
| return transcription.text, detected_emotion | |
| css = """ | |
| .gradio-container { | |
| font-family: 'IBM Plex Sans', sans-serif; | |
| } | |
| .gr-button { | |
| color: white; | |
| border-color: black; | |
| background: black; | |
| } | |
| input[type='range'] { | |
| accent-color: black; | |
| } | |
| .dark input[type='range'] { | |
| accent-color: #dfdfdf; | |
| } | |
| .container { | |
| max-width: 730px; | |
| margin: auto; | |
| padding-top: 1.5rem; | |
| } | |
| #gallery { | |
| min-height: 22rem; | |
| margin-bottom: 15px; | |
| margin-left: auto; | |
| margin-right: auto; | |
| border-bottom-right-radius: .5rem !important; | |
| border-bottom-left-radius: .5rem !important; | |
| } | |
| #gallery>div>.h-full { | |
| min-height: 20rem; | |
| } | |
| .details:hover { | |
| text-decoration: underline; | |
| } | |
| .gr-button { | |
| white-space: nowrap; | |
| } | |
| .gr-button:focus { | |
| border-color: rgb(147 197 253 / var(--tw-border-opacity)); | |
| outline: none; | |
| box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); | |
| --tw-border-opacity: 1; | |
| --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); | |
| --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); | |
| --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); | |
| --tw-ring-opacity: .5; | |
| } | |
| #advanced-btn { | |
| font-size: .7rem !important; | |
| line-height: 19px; | |
| margin-top: 12px; | |
| margin-bottom: 12px; | |
| padding: 2px 8px; | |
| border-radius: 14px !important; | |
| } | |
| #advanced-options { | |
| display: none; | |
| margin-bottom: 20px; | |
| } | |
| .footer { | |
| margin-bottom: 45px; | |
| margin-top: 35px; | |
| text-align: center; | |
| border-bottom: 1px solid #e5e5e5; | |
| } | |
| .footer>p { | |
| font-size: .8rem; | |
| display: inline-block; | |
| padding: 0 10px; | |
| transform: translateY(10px); | |
| background: white; | |
| } | |
| .dark .footer { | |
| border-color: #303030; | |
| } | |
| .dark .footer>p { | |
| background: #0b0f19; | |
| } | |
| """ | |
| with gr.Blocks(css = css) as demo: | |
| gr.Markdown(""" | |
| ## Emotion Detection From Speech with Whisper | |
| """) | |
| gr.HTML(''' | |
| <p style="margin-bottom: 10px; font-size: 94%"> | |
| Whisper is a general-purpose speech recognition model released by OpenAI that can perform multilingual speech recognition as well as speech translation and language identification. Combined with a emotion detection model,this allows for detecting emotion directly from speech in multiple languages and can potentially be used to analyze setiment from customer calls. | |
| </p> | |
| ''') | |
| with gr.Column(): | |
| #gr.Markdown(""" ### Record audio """) | |
| with gr.Tab("Record Audio"): | |
| audio_input_r = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath") | |
| transcribe_audio_r = gr.Button('Transcribe') | |
| with gr.Tab("Upload Audio as File"): | |
| audio_input_u = gr.Audio(label = 'Upload Audio',source="upload",type="filepath") | |
| transcribe_audio_u = gr.Button('Transcribe') | |
| with gr.Row(): | |
| transcript_output = gr.Textbox(label="Transcription in the language you spoke", lines = 3) | |
| emotion_output = gr.Textbox(label = "Detected Emotion") | |
| transcribe_audio_r.click(translate_and_classify, inputs = audio_input_r, outputs = [transcript_output,emotion_output]) | |
| transcribe_audio_u.click(translate_and_classify, inputs = audio_input_u, outputs = [transcript_output,emotion_output]) | |
| gr.HTML(''' | |
| <div class="footer"> | |
| <p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> - | |
| <a href="https://huggingface.co/bhadresh-savani/distilbert-base-uncased-emotion" style="text-decoration: underline;" target="_blank">Emotion Detection Model</a> | |
| </p> | |
| </div> | |
| ''') | |
| demo.launch() |