Arnavkumar01 commited on
Commit
53354e8
Β·
1 Parent(s): 1a15b05

I am still here, and the change in that we removed the model description because it is not supported by multilingualV2

Browse files
Files changed (1) hide show
  1. main.py +3 -4
main.py CHANGED
@@ -179,8 +179,8 @@ def transcribe_audio(audio_path: str, audio_bytes: bytes) -> str:
179
 
180
  def generate_elevenlabs_sync(text: str) -> bytes:
181
  """
182
- Uses the **hard-coded voice ID** and the correct SDK method
183
- `client.text_to_speech.convert`.
184
  """
185
  if client_elevenlabs is None:
186
  logging.error("ElevenLabs client not initialized – skipping TTS.")
@@ -192,10 +192,9 @@ def generate_elevenlabs_sync(text: str) -> bytes:
192
  stream = client_elevenlabs.text_to_speech.convert(
193
  voice_id=ELEVENLABS_VOICE_ID,
194
  text=text,
195
- model="eleven_multilingual_v2",
196
  output_format="mp3_44100_128",
 
197
  )
198
- # The SDK returns a generator of bytes – collect everything
199
  audio_bytes = b""
200
  for chunk in stream:
201
  if chunk:
 
179
 
180
  def generate_elevenlabs_sync(text: str) -> bytes:
181
  """
182
+ Uses the hard-coded voice ID and the correct SDK method.
183
+ NOTE: `model` parameter is REMOVED in SDK v2.17.0+
184
  """
185
  if client_elevenlabs is None:
186
  logging.error("ElevenLabs client not initialized – skipping TTS.")
 
192
  stream = client_elevenlabs.text_to_speech.convert(
193
  voice_id=ELEVENLABS_VOICE_ID,
194
  text=text,
 
195
  output_format="mp3_44100_128",
196
+ # model="eleven_multilingual_v2" ← REMOVED
197
  )
 
198
  audio_bytes = b""
199
  for chunk in stream:
200
  if chunk: