Spaces:
Sleeping
Sleeping
Commit
·
b6ab738
verified
·
0
Parent(s):
initial commit
Browse files- .gitattributes +35 -0
- README.md +13 -0
- app.py +92 -0
- requirements.txt +0 -0
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Test Gpt Omni
|
| 3 |
+
emoji: ⚡
|
| 4 |
+
colorFrom: gray
|
| 5 |
+
colorTo: yellow
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.0.2
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
short_description: Experimenting with multimodal models and Gradio 5
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import numpy as np
|
| 3 |
+
import io
|
| 4 |
+
import tempfile
|
| 5 |
+
from pydub import AudioSegment
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class AppState:
|
| 11 |
+
stream: np.ndarray | None = None
|
| 12 |
+
sampling_rate: int = 0
|
| 13 |
+
pause_detected: bool = False
|
| 14 |
+
stopped: bool = False
|
| 15 |
+
started_talking: bool = False
|
| 16 |
+
conversation: list = field(default_factory=list) # Use default_factory for mutable defaults
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# Function to process audio input and detect pauses
|
| 20 |
+
def process_audio(audio: tuple, state: AppState):
|
| 21 |
+
if state.stream is None:
|
| 22 |
+
state.stream = audio[1]
|
| 23 |
+
state.sampling_rate = audio[0]
|
| 24 |
+
else:
|
| 25 |
+
state.stream = np.concatenate((state.stream, audio[1]))
|
| 26 |
+
|
| 27 |
+
# Custom pause detection logic (replace with actual implementation)
|
| 28 |
+
pause_detected = len(state.stream) > state.sampling_rate * 1 # Example: 1-sec pause
|
| 29 |
+
state.pause_detected = pause_detected
|
| 30 |
+
|
| 31 |
+
if state.pause_detected:
|
| 32 |
+
return gr.Audio(recording=False), state # Stop recording
|
| 33 |
+
return None, state
|
| 34 |
+
|
| 35 |
+
# Generate chatbot response from user audio input
|
| 36 |
+
def response(state: AppState):
|
| 37 |
+
if not state.pause_detected:
|
| 38 |
+
return None, state
|
| 39 |
+
|
| 40 |
+
# Convert user audio to WAV format
|
| 41 |
+
audio_buffer = io.BytesIO()
|
| 42 |
+
segment = AudioSegment(
|
| 43 |
+
state.stream.tobytes(),
|
| 44 |
+
frame_rate=state.sampling_rate,
|
| 45 |
+
sample_width=state.stream.dtype.itemsize,
|
| 46 |
+
channels=1 if len(state.stream.shape) == 1 else state.stream.shape[1]
|
| 47 |
+
)
|
| 48 |
+
segment.export(audio_buffer, format="wav")
|
| 49 |
+
|
| 50 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
|
| 51 |
+
f.write(audio_buffer.getvalue())
|
| 52 |
+
state.conversation.append({"role": "user", "content": {"path": f.name, "mime_type": "audio/wav"}})
|
| 53 |
+
|
| 54 |
+
# Simulate chatbot's response (replace with mini omni model logic)
|
| 55 |
+
chatbot_response = b"Simulated response audio content" # Placeholder
|
| 56 |
+
output_buffer = chatbot_response # Stream actual chatbot response here
|
| 57 |
+
|
| 58 |
+
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as f:
|
| 59 |
+
f.write(output_buffer)
|
| 60 |
+
state.conversation.append({"role": "assistant", "content": {"path": f.name, "mime_type": "audio/mp3"}})
|
| 61 |
+
|
| 62 |
+
yield None, state
|
| 63 |
+
|
| 64 |
+
# --- Gradio Interface ---
|
| 65 |
+
|
| 66 |
+
def start_recording_user(state: AppState):
|
| 67 |
+
if not state.stopped:
|
| 68 |
+
return gr.Audio(recording=True)
|
| 69 |
+
|
| 70 |
+
# Build Gradio app using Blocks API
|
| 71 |
+
with gr.Blocks() as demo:
|
| 72 |
+
with gr.Row():
|
| 73 |
+
with gr.Column():
|
| 74 |
+
input_audio = gr.Audio(label="Input Audio", sources="microphone", type="numpy")
|
| 75 |
+
with gr.Column():
|
| 76 |
+
chatbot = gr.Chatbot(label="Conversation", type="messages")
|
| 77 |
+
output_audio = gr.Audio(label="Output Audio", streaming=True, autoplay=True)
|
| 78 |
+
|
| 79 |
+
state = gr.State(value=AppState())
|
| 80 |
+
|
| 81 |
+
stream = input_audio.stream(
|
| 82 |
+
process_audio, [input_audio, state], [input_audio, state], stream_every=0.5, time_limit=30
|
| 83 |
+
)
|
| 84 |
+
respond = input_audio.stop_recording(response, [state], [output_audio, state])
|
| 85 |
+
respond.then(lambda s: s.conversation, [state], [chatbot])
|
| 86 |
+
|
| 87 |
+
restart = output_audio.stop(start_recording_user, [state], [input_audio])
|
| 88 |
+
cancel = gr.Button("Stop Conversation", variant="stop")
|
| 89 |
+
cancel.click(lambda: (AppState(stopped=True), gr.Audio(recording=False)), None, [state, input_audio], cancels=[respond, restart])
|
| 90 |
+
|
| 91 |
+
if __name__ == "__main__":
|
| 92 |
+
demo.launch()
|
requirements.txt
ADDED
|
File without changes
|