Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import streamlit as st
|
|
|
|
| 2 |
from transformers import pipeline
|
| 3 |
from pytube import YouTube
|
| 4 |
from pydub import AudioSegment
|
|
@@ -37,7 +38,7 @@ def audio_extraction(video_file, output_format):
|
|
| 37 |
input_path=os.fspath(video_file), output_path=f"{str(video_file)[:-4]}.mp3", output_format=f"{output_format}"
|
| 38 |
)
|
| 39 |
return audio
|
| 40 |
-
|
| 41 |
|
| 42 |
def audio_processing(mp3_audio):
|
| 43 |
audio = AudioSegment.from_file(mp3_audio, format="mp3")
|
|
@@ -52,9 +53,12 @@ def load_asr_model():
|
|
| 52 |
return asr_model
|
| 53 |
|
| 54 |
def transcribe_video(processed_audio):
|
|
|
|
| 55 |
transcriber_model = load_asr_model()
|
| 56 |
text_extract = transcriber_model(processed_audio)
|
| 57 |
-
|
|
|
|
|
|
|
| 58 |
|
| 59 |
def generate_ai_summary(transcript):
|
| 60 |
model = google_genai.GenerativeModel('gemini-pro')
|
|
@@ -76,11 +80,13 @@ with youtube_url_tab:
|
|
| 76 |
if url:
|
| 77 |
if st.button("Transcribe", key="yturl"):
|
| 78 |
with st.spinner("Transcribing..."):
|
| 79 |
-
audio = audio_extraction(yt_video, "mp3")
|
| 80 |
audio = audio_processing(audio)
|
| 81 |
-
ytvideo_transcript = transcribe_video(audio)
|
| 82 |
st.success(f"Transcription successful")
|
| 83 |
st.write(ytvideo_transcript)
|
|
|
|
|
|
|
| 84 |
if st.button("Generate Summary"):
|
| 85 |
summary = generate_ai_summary(ytvideo_transcript)
|
| 86 |
st.write(summary)
|
|
@@ -102,12 +108,15 @@ with file_select_tab:
|
|
| 102 |
with st.spinner("Transcribing..."):
|
| 103 |
audio = audio_extraction(video_file, "mp3")
|
| 104 |
audio = audio_processing(audio)
|
| 105 |
-
video_transcript = transcribe_video(audio)
|
| 106 |
st.success(f"Transcription successful")
|
| 107 |
st.write(video_transcript)
|
|
|
|
|
|
|
| 108 |
if st.button("Generate Summary", key="ti2"):
|
| 109 |
summary = generate_ai_summary(video_transcript)
|
| 110 |
st.write(summary)
|
|
|
|
| 111 |
|
| 112 |
except Exception as e:
|
| 113 |
st.error(e)
|
|
@@ -120,9 +129,10 @@ with audio_file_tab:
|
|
| 120 |
if st.button("Transcribe", key="audiofile"):
|
| 121 |
with st.spinner("Transcribing..."):
|
| 122 |
processed_audio = audio_processing(audio_file)
|
| 123 |
-
audio_transcript = transcribe_video(processed_audio)
|
| 124 |
st.success(f"Transcription successful")
|
| 125 |
st.write(audio_transcript)
|
|
|
|
| 126 |
|
| 127 |
|
| 128 |
if st.button("Generate Summary", key="ti1"):
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
import time
|
| 3 |
from transformers import pipeline
|
| 4 |
from pytube import YouTube
|
| 5 |
from pydub import AudioSegment
|
|
|
|
| 38 |
input_path=os.fspath(video_file), output_path=f"{str(video_file)[:-4]}.mp3", output_format=f"{output_format}"
|
| 39 |
)
|
| 40 |
return audio
|
| 41 |
+
|
| 42 |
|
| 43 |
def audio_processing(mp3_audio):
|
| 44 |
audio = AudioSegment.from_file(mp3_audio, format="mp3")
|
|
|
|
| 53 |
return asr_model
|
| 54 |
|
| 55 |
def transcribe_video(processed_audio):
|
| 56 |
+
st = time.now()
|
| 57 |
transcriber_model = load_asr_model()
|
| 58 |
text_extract = transcriber_model(processed_audio)
|
| 59 |
+
et = time.now()
|
| 60 |
+
run_time = et - st
|
| 61 |
+
return text_extract['text'], run_time
|
| 62 |
|
| 63 |
def generate_ai_summary(transcript):
|
| 64 |
model = google_genai.GenerativeModel('gemini-pro')
|
|
|
|
| 80 |
if url:
|
| 81 |
if st.button("Transcribe", key="yturl"):
|
| 82 |
with st.spinner("Transcribing..."):
|
| 83 |
+
audio = audio_extraction(os.fspath(yt_video), "mp3")
|
| 84 |
audio = audio_processing(audio)
|
| 85 |
+
ytvideo_transcript, run_time = transcribe_video(audio)
|
| 86 |
st.success(f"Transcription successful")
|
| 87 |
st.write(ytvideo_transcript)
|
| 88 |
+
st.write(f'Completed in {run_time}')
|
| 89 |
+
|
| 90 |
if st.button("Generate Summary"):
|
| 91 |
summary = generate_ai_summary(ytvideo_transcript)
|
| 92 |
st.write(summary)
|
|
|
|
| 108 |
with st.spinner("Transcribing..."):
|
| 109 |
audio = audio_extraction(video_file, "mp3")
|
| 110 |
audio = audio_processing(audio)
|
| 111 |
+
video_transcript, run_time = transcribe_video(audio)
|
| 112 |
st.success(f"Transcription successful")
|
| 113 |
st.write(video_transcript)
|
| 114 |
+
st.write(f'Completed in {run_time}')
|
| 115 |
+
|
| 116 |
if st.button("Generate Summary", key="ti2"):
|
| 117 |
summary = generate_ai_summary(video_transcript)
|
| 118 |
st.write(summary)
|
| 119 |
+
|
| 120 |
|
| 121 |
except Exception as e:
|
| 122 |
st.error(e)
|
|
|
|
| 129 |
if st.button("Transcribe", key="audiofile"):
|
| 130 |
with st.spinner("Transcribing..."):
|
| 131 |
processed_audio = audio_processing(audio_file)
|
| 132 |
+
audio_transcript, run_time = transcribe_video(processed_audio)
|
| 133 |
st.success(f"Transcription successful")
|
| 134 |
st.write(audio_transcript)
|
| 135 |
+
st.write(f'Completed in {run_time}')
|
| 136 |
|
| 137 |
|
| 138 |
if st.button("Generate Summary", key="ti1"):
|