Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,293 +4,73 @@ import json
|
|
| 4 |
import torch
|
| 5 |
import wavio
|
| 6 |
from tqdm import tqdm
|
| 7 |
-
|
| 8 |
-
from models import AudioDiffusion, DDPMScheduler
|
| 9 |
-
from audioldm.audio.stft import TacotronSTFT
|
| 10 |
-
from audioldm.variational_autoencoder import AutoencoderKL
|
| 11 |
from pydub import AudioSegment
|
| 12 |
from gradio import Markdown
|
|
|
|
| 13 |
import torch
|
| 14 |
-
from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
| 15 |
from diffusers import DiffusionPipeline,AudioPipelineOutput
|
| 16 |
from transformers import CLIPTextModel, T5EncoderModel, AutoModel, T5Tokenizer, T5TokenizerFast
|
| 17 |
from typing import Union
|
| 18 |
from diffusers.utils.torch_utils import randn_tensor
|
| 19 |
from tqdm import tqdm
|
| 20 |
-
from
|
| 21 |
-
|
| 22 |
-
from transformers import pipeline
|
| 23 |
-
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
| 24 |
|
| 25 |
-
class Tango2Pipeline(DiffusionPipeline):
|
| 26 |
-
def __init__(
|
| 27 |
-
self,
|
| 28 |
-
vae: AutoencoderKL,
|
| 29 |
-
text_encoder: T5EncoderModel,
|
| 30 |
-
tokenizer: Union[T5Tokenizer, T5TokenizerFast],
|
| 31 |
-
unet: UNet2DConditionModel,
|
| 32 |
-
scheduler: DDPMScheduler
|
| 33 |
-
):
|
| 34 |
-
|
| 35 |
-
super().__init__()
|
| 36 |
-
|
| 37 |
-
self.register_modules(vae=vae,
|
| 38 |
-
text_encoder=text_encoder,
|
| 39 |
-
tokenizer=tokenizer,
|
| 40 |
-
unet=unet,
|
| 41 |
-
scheduler=scheduler
|
| 42 |
-
)
|
| 43 |
-
|
| 44 |
-
def _encode_prompt(self, prompt):
|
| 45 |
-
device = self.text_encoder.device
|
| 46 |
-
|
| 47 |
-
batch = self.tokenizer(
|
| 48 |
-
prompt, max_length=self.tokenizer.model_max_length, padding=True, truncation=True, return_tensors="pt"
|
| 49 |
-
)
|
| 50 |
-
input_ids, attention_mask = batch.input_ids.to(device), batch.attention_mask.to(device)
|
| 51 |
|
| 52 |
-
|
| 53 |
-
encoder_hidden_states = self.text_encoder(
|
| 54 |
-
input_ids=input_ids, attention_mask=attention_mask
|
| 55 |
-
)[0]
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
return encoder_hidden_states, boolean_encoder_mask
|
| 60 |
-
|
| 61 |
-
def _encode_text_classifier_free(self, prompt, num_samples_per_prompt):
|
| 62 |
-
device = self.text_encoder.device
|
| 63 |
-
batch = self.tokenizer(
|
| 64 |
-
prompt, max_length=self.tokenizer.model_max_length, padding=True, truncation=True, return_tensors="pt"
|
| 65 |
-
)
|
| 66 |
-
input_ids, attention_mask = batch.input_ids.to(device), batch.attention_mask.to(device)
|
| 67 |
|
| 68 |
-
with torch.no_grad():
|
| 69 |
-
prompt_embeds = self.text_encoder(
|
| 70 |
-
input_ids=input_ids, attention_mask=attention_mask
|
| 71 |
-
)[0]
|
| 72 |
-
|
| 73 |
-
prompt_embeds = prompt_embeds.repeat_interleave(num_samples_per_prompt, 0)
|
| 74 |
-
attention_mask = attention_mask.repeat_interleave(num_samples_per_prompt, 0)
|
| 75 |
|
| 76 |
-
# get unconditional embeddings for classifier free guidance
|
| 77 |
-
uncond_tokens = [""] * len(prompt)
|
| 78 |
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
uncond_tokens, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt",
|
| 82 |
-
)
|
| 83 |
-
uncond_input_ids = uncond_batch.input_ids.to(device)
|
| 84 |
-
uncond_attention_mask = uncond_batch.attention_mask.to(device)
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
input_ids=uncond_input_ids, attention_mask=uncond_attention_mask
|
| 89 |
-
)[0]
|
| 90 |
-
|
| 91 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat_interleave(num_samples_per_prompt, 0)
|
| 92 |
-
uncond_attention_mask = uncond_attention_mask.repeat_interleave(num_samples_per_prompt, 0)
|
| 93 |
-
|
| 94 |
-
# For classifier free guidance, we need to do two forward passes.
|
| 95 |
-
# We concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes
|
| 96 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 97 |
-
prompt_mask = torch.cat([uncond_attention_mask, attention_mask])
|
| 98 |
-
boolean_prompt_mask = (prompt_mask == 1).to(device)
|
| 99 |
-
|
| 100 |
-
return prompt_embeds, boolean_prompt_mask
|
| 101 |
-
|
| 102 |
-
def prepare_latents(self, batch_size, inference_scheduler, num_channels_latents, dtype, device):
|
| 103 |
-
shape = (batch_size, num_channels_latents, 256, 16)
|
| 104 |
-
latents = randn_tensor(shape, generator=None, device=device, dtype=dtype)
|
| 105 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
| 106 |
-
latents = latents * inference_scheduler.init_noise_sigma
|
| 107 |
-
return latents
|
| 108 |
|
| 109 |
-
@torch.no_grad()
|
| 110 |
-
def inference(self, prompt, inference_scheduler, num_steps=20, guidance_scale=3, num_samples_per_prompt=1,
|
| 111 |
-
disable_progress=True):
|
| 112 |
-
device = self.text_encoder.device
|
| 113 |
-
classifier_free_guidance = guidance_scale > 1.0
|
| 114 |
-
batch_size = len(prompt) * num_samples_per_prompt
|
| 115 |
-
|
| 116 |
-
if classifier_free_guidance:
|
| 117 |
-
prompt_embeds, boolean_prompt_mask = self._encode_text_classifier_free(prompt, num_samples_per_prompt)
|
| 118 |
-
else:
|
| 119 |
-
prompt_embeds, boolean_prompt_mask = self._encode_text(prompt)
|
| 120 |
-
prompt_embeds = prompt_embeds.repeat_interleave(num_samples_per_prompt, 0)
|
| 121 |
-
boolean_prompt_mask = boolean_prompt_mask.repeat_interleave(num_samples_per_prompt, 0)
|
| 122 |
-
|
| 123 |
-
inference_scheduler.set_timesteps(num_steps, device=device)
|
| 124 |
-
timesteps = inference_scheduler.timesteps
|
| 125 |
-
|
| 126 |
-
num_channels_latents = self.unet.config.in_channels
|
| 127 |
-
latents = self.prepare_latents(batch_size, inference_scheduler, num_channels_latents, prompt_embeds.dtype, device)
|
| 128 |
-
|
| 129 |
-
num_warmup_steps = len(timesteps) - num_steps * inference_scheduler.order
|
| 130 |
-
progress_bar = tqdm(range(num_steps), disable=disable_progress)
|
| 131 |
-
|
| 132 |
-
for i, t in enumerate(timesteps):
|
| 133 |
-
# expand the latents if we are doing classifier free guidance
|
| 134 |
-
latent_model_input = torch.cat([latents] * 2) if classifier_free_guidance else latents
|
| 135 |
-
latent_model_input = inference_scheduler.scale_model_input(latent_model_input, t)
|
| 136 |
-
|
| 137 |
-
noise_pred = self.unet(
|
| 138 |
-
latent_model_input, t, encoder_hidden_states=prompt_embeds,
|
| 139 |
-
encoder_attention_mask=boolean_prompt_mask
|
| 140 |
-
).sample
|
| 141 |
-
|
| 142 |
-
# perform guidance
|
| 143 |
-
if classifier_free_guidance:
|
| 144 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 145 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 146 |
-
|
| 147 |
-
# compute the previous noisy sample x_t -> x_t-1
|
| 148 |
-
latents = inference_scheduler.step(noise_pred, t, latents).prev_sample
|
| 149 |
-
|
| 150 |
-
# call the callback, if provided
|
| 151 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % inference_scheduler.order == 0):
|
| 152 |
-
progress_bar.update(1)
|
| 153 |
-
|
| 154 |
-
return latents
|
| 155 |
-
|
| 156 |
-
@torch.no_grad()
|
| 157 |
-
def __call__(self, prompt, steps=100, guidance=3, samples=1, disable_progress=True):
|
| 158 |
-
""" Genrate audio for a single prompt string. """
|
| 159 |
-
with torch.no_grad():
|
| 160 |
-
latents = self.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
|
| 161 |
-
mel = self.vae.decode_first_stage(latents)
|
| 162 |
-
wave = self.vae.decode_to_waveform(mel)
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
return AudioPipelineOutput(audios=wave)
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
# Automatic device detection
|
| 169 |
-
if torch.cuda.is_available():
|
| 170 |
-
device_type = "cuda"
|
| 171 |
-
device_selection = "cuda:0"
|
| 172 |
-
else:
|
| 173 |
-
device_type = "cpu"
|
| 174 |
-
device_selection = "cpu"
|
| 175 |
-
|
| 176 |
-
class Tango:
|
| 177 |
-
def __init__(self, name="declare-lab/tango2", device=device_selection):
|
| 178 |
-
|
| 179 |
-
path = snapshot_download(repo_id=name)
|
| 180 |
-
|
| 181 |
-
vae_config = json.load(open("{}/vae_config.json".format(path)))
|
| 182 |
-
stft_config = json.load(open("{}/stft_config.json".format(path)))
|
| 183 |
-
main_config = json.load(open("{}/main_config.json".format(path)))
|
| 184 |
-
|
| 185 |
-
self.vae = AutoencoderKL(**vae_config).to(device)
|
| 186 |
-
self.stft = TacotronSTFT(**stft_config).to(device)
|
| 187 |
-
self.model = AudioDiffusion(**main_config).to(device)
|
| 188 |
-
|
| 189 |
-
vae_weights = torch.load("{}/pytorch_model_vae.bin".format(path), map_location=device)
|
| 190 |
-
stft_weights = torch.load("{}/pytorch_model_stft.bin".format(path), map_location=device)
|
| 191 |
-
main_weights = torch.load("{}/pytorch_model_main.bin".format(path), map_location=device)
|
| 192 |
-
|
| 193 |
-
self.vae.load_state_dict(vae_weights)
|
| 194 |
-
self.stft.load_state_dict(stft_weights)
|
| 195 |
-
self.model.load_state_dict(main_weights)
|
| 196 |
-
|
| 197 |
-
print ("Successfully loaded checkpoint from:", name)
|
| 198 |
-
|
| 199 |
-
self.vae.eval()
|
| 200 |
-
self.stft.eval()
|
| 201 |
-
self.model.eval()
|
| 202 |
-
|
| 203 |
-
self.scheduler = DDPMScheduler.from_pretrained(main_config["scheduler_name"], subfolder="scheduler")
|
| 204 |
-
|
| 205 |
-
def chunks(self, lst, n):
|
| 206 |
-
""" Yield successive n-sized chunks from a list. """
|
| 207 |
-
for i in range(0, len(lst), n):
|
| 208 |
-
yield lst[i:i + n]
|
| 209 |
-
|
| 210 |
-
def generate(self, prompt, steps=100, guidance=3, samples=1, disable_progress=True):
|
| 211 |
-
""" Genrate audio for a single prompt string. """
|
| 212 |
-
with torch.no_grad():
|
| 213 |
-
latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
|
| 214 |
-
mel = self.vae.decode_first_stage(latents)
|
| 215 |
-
wave = self.vae.decode_to_waveform(mel)
|
| 216 |
-
return wave[0]
|
| 217 |
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
for k in tqdm(range(0, len(prompts), batch_size)):
|
| 222 |
-
batch = prompts[k: k+batch_size]
|
| 223 |
-
with torch.no_grad():
|
| 224 |
-
latents = self.model.inference(batch, self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
|
| 225 |
-
mel = self.vae.decode_first_stage(latents)
|
| 226 |
-
wave = self.vae.decode_to_waveform(mel)
|
| 227 |
-
outputs += [item for item in wave]
|
| 228 |
-
if samples == 1:
|
| 229 |
-
return outputs
|
| 230 |
-
else:
|
| 231 |
-
return list(self.chunks(outputs, samples))
|
| 232 |
|
| 233 |
-
#
|
|
|
|
|
|
|
|
|
|
| 234 |
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
tango.stft.to(device_type)
|
| 238 |
-
tango.model.to(device_type)
|
| 239 |
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
unet=tango.model.unet,
|
| 244 |
-
scheduler=tango.scheduler
|
| 245 |
-
)
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
@spaces.GPU(duration=60)
|
| 249 |
-
def gradio_generate(prompt, output_format, steps, guidance):
|
| 250 |
-
# 한글이 포함되어 있는지 확인
|
| 251 |
-
if any(ord('가') <= ord(char) <= ord('힣') for char in prompt):
|
| 252 |
-
# 한글을 영어로 번역
|
| 253 |
-
translation = translator(prompt)[0]['translation_text']
|
| 254 |
-
prompt = translation
|
| 255 |
-
print(f"Translated prompt: {prompt}")
|
| 256 |
-
|
| 257 |
-
output_wave = pipe(prompt,steps,guidance)
|
| 258 |
-
output_wave = output_wave.audios[0]
|
| 259 |
-
output_filename = "temp.wav"
|
| 260 |
-
wavio.write(output_filename, output_wave, rate=16000, sampwidth=2)
|
| 261 |
|
| 262 |
-
|
| 263 |
-
AudioSegment.from_wav("temp.wav").export("temp.mp3", format = "mp3")
|
| 264 |
-
output_filename = "temp.mp3"
|
| 265 |
|
| 266 |
-
return output_filename
|
| 267 |
|
|
|
|
| 268 |
input_text = gr.Textbox(lines=2, label="Prompt")
|
| 269 |
-
output_format = gr.Radio(label = "Output format", info = "The file you can dowload", choices =
|
| 270 |
output_audio = gr.Audio(label="Generated Audio", type="filepath")
|
| 271 |
-
denoising_steps = gr.Slider(minimum=
|
| 272 |
-
guidance_scale = gr.Slider(minimum=1, maximum=10, value=
|
|
|
|
| 273 |
|
| 274 |
-
css = """
|
| 275 |
-
footer {
|
| 276 |
-
visibility: hidden;
|
| 277 |
-
}
|
| 278 |
-
"""
|
| 279 |
|
|
|
|
| 280 |
gr_interface = gr.Interface(
|
| 281 |
fn=gradio_generate,
|
| 282 |
-
inputs=[input_text,
|
| 283 |
-
outputs=
|
| 284 |
-
title="
|
| 285 |
-
|
| 286 |
-
css=css,
|
| 287 |
allow_flagging=False,
|
| 288 |
examples=[
|
| 289 |
["Quiet whispered conversation gradually fading into distant jet engine roar diminishing into silence"],
|
| 290 |
["Clear sound of bicycle tires crunching on loose gravel and dirt, followed by deep male laughter echoing"],
|
| 291 |
["Multiple ducks quacking loudly with splashing water and piercing wild animal shriek in background"],
|
| 292 |
-
["Powerful ocean waves crashing and receding on sandy beach with distant seagulls"],
|
| 293 |
-
["기관총 발사 소음"],
|
| 294 |
["Gentle female voice cooing and baby responding with happy gurgles and giggles"],
|
| 295 |
["Clear male voice speaking, sharp popping sound, followed by genuine group laughter"],
|
| 296 |
["Stream of water hitting empty ceramic cup, pitch rising as cup fills up"],
|
|
@@ -310,8 +90,76 @@ gr_interface = gr.Interface(
|
|
| 310 |
["Massive stadium crowd cheering as thunder crashes and lightning strikes"],
|
| 311 |
["Heavy helicopter blades chopping through air with engine and wind noise"],
|
| 312 |
["Dog barking excitedly and man shouting as race car engine roars past"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 313 |
],
|
| 314 |
cache_examples="lazy", # Turn on to cache.
|
| 315 |
)
|
| 316 |
|
| 317 |
-
|
|
|
|
|
|
|
|
|
| 4 |
import torch
|
| 5 |
import wavio
|
| 6 |
from tqdm import tqdm
|
| 7 |
+
from huggingface_hub import snapshot_download
|
|
|
|
|
|
|
|
|
|
| 8 |
from pydub import AudioSegment
|
| 9 |
from gradio import Markdown
|
| 10 |
+
import uuid
|
| 11 |
import torch
|
|
|
|
| 12 |
from diffusers import DiffusionPipeline,AudioPipelineOutput
|
| 13 |
from transformers import CLIPTextModel, T5EncoderModel, AutoModel, T5Tokenizer, T5TokenizerFast
|
| 14 |
from typing import Union
|
| 15 |
from diffusers.utils.torch_utils import randn_tensor
|
| 16 |
from tqdm import tqdm
|
| 17 |
+
from TangoFlux import TangoFluxInference
|
| 18 |
+
import torchaudio
|
|
|
|
|
|
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
+
tangoflux = TangoFluxInference(name="declare-lab/TangoFlux")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
@spaces.GPU(duration=15)
|
| 27 |
+
def gradio_generate(prompt, steps, guidance,duration=10):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
+
output = tangoflux.generate(prompt,steps=steps,guidance_scale=guidance,duration=duration)
|
| 30 |
+
#output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
#wavio.write(output_filename, output_wave, rate=44100, sampwidth=2)
|
| 34 |
+
filename = 'temp.wav'
|
| 35 |
+
#print(f"Saving audio to file: {unique_filename}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
+
# Save to file
|
| 38 |
+
output = output[:,:int(duration*44100)]
|
| 39 |
+
torchaudio.save(filename, output, 44100)
|
| 40 |
+
|
| 41 |
|
| 42 |
+
# Return the path to the generated audio file
|
| 43 |
+
return filename
|
|
|
|
|
|
|
| 44 |
|
| 45 |
+
#if (output_format == "mp3"):
|
| 46 |
+
# AudioSegment.from_wav("temp.wav").export("temp.mp3", format = "mp3")
|
| 47 |
+
# output_filename = "temp.mp3"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
+
#return output_filename
|
|
|
|
|
|
|
| 50 |
|
|
|
|
| 51 |
|
| 52 |
+
# Gradio input and output components
|
| 53 |
input_text = gr.Textbox(lines=2, label="Prompt")
|
| 54 |
+
#output_format = gr.Radio(label = "Output format", info = "The file you can dowload", choices = "wav"], value = "wav")
|
| 55 |
output_audio = gr.Audio(label="Generated Audio", type="filepath")
|
| 56 |
+
denoising_steps = gr.Slider(minimum=10, maximum=100, value=25, step=5, label="Steps", interactive=True)
|
| 57 |
+
guidance_scale = gr.Slider(minimum=1, maximum=10, value=4.5, step=0.5, label="Guidance Scale", interactive=True)
|
| 58 |
+
duration_scale = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Duration", interactive=True)
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
+
# Gradio interface
|
| 62 |
gr_interface = gr.Interface(
|
| 63 |
fn=gradio_generate,
|
| 64 |
+
inputs=[input_text, denoising_steps, guidance_scale,duration_scale],
|
| 65 |
+
outputs=output_audio,
|
| 66 |
+
title="TangoFlux: Super Fast and Faithful Text to Audio Generation with Flow Matching and Clap-Ranked Preference Optimization",
|
| 67 |
+
description=description_text,
|
|
|
|
| 68 |
allow_flagging=False,
|
| 69 |
examples=[
|
| 70 |
["Quiet whispered conversation gradually fading into distant jet engine roar diminishing into silence"],
|
| 71 |
["Clear sound of bicycle tires crunching on loose gravel and dirt, followed by deep male laughter echoing"],
|
| 72 |
["Multiple ducks quacking loudly with splashing water and piercing wild animal shriek in background"],
|
| 73 |
+
["Powerful ocean waves crashing and receding on sandy beach with distant seagulls"],
|
|
|
|
| 74 |
["Gentle female voice cooing and baby responding with happy gurgles and giggles"],
|
| 75 |
["Clear male voice speaking, sharp popping sound, followed by genuine group laughter"],
|
| 76 |
["Stream of water hitting empty ceramic cup, pitch rising as cup fills up"],
|
|
|
|
| 90 |
["Massive stadium crowd cheering as thunder crashes and lightning strikes"],
|
| 91 |
["Heavy helicopter blades chopping through air with engine and wind noise"],
|
| 92 |
["Dog barking excitedly and man shouting as race car engine roars past"]
|
| 93 |
+
["Generate the festive sounds of a fireworks show: explosions lighting up the sky, crowd cheering, and the faint music playing in the background!! Celebration of the new year!"],
|
| 94 |
+
["Melodic human whistling harmonizing with natural birdsong"],
|
| 95 |
+
["A parade marches through a town square, with drumbeats pounding, children clapping, and a horse neighing amidst the commotion"],
|
| 96 |
+
["Quiet speech and then and airplane flying away"],
|
| 97 |
+
["A soccer ball hits a goalpost with a metallic clang, followed by cheers, clapping, and the distant hum of a commentator’s voice"],
|
| 98 |
+
["A basketball bounces rhythmically on a court, shoes squeak against the floor, and a referee’s whistle cuts through the air"],
|
| 99 |
+
["Dripping water echoes sharply, a distant growl reverberates through the cavern, and soft scraping metal suggests something lurking unseen"],
|
| 100 |
+
["A cow is mooing whilst a lion is roaring in the background as a hunter shoots. A flock of birds subsequently fly away from the trees."],
|
| 101 |
+
["The deep growl of an alligator ripples through the swamp as reeds sway with a soft rustle and a turtle splashes into the murky water"],
|
| 102 |
+
["Gentle female voice cooing and baby responding with happy gurgles and giggles"],
|
| 103 |
+
['doorbell ding once followed by footsteps gradually getting louder and a door is opened '],
|
| 104 |
+
["A fork scrapes a plate, water drips slowly into a sink, and the faint hum of a refrigerator lingers in the background"],
|
| 105 |
+
["Powerful ocean waves crashing and receding on sandy beach with distant seagulls"],
|
| 106 |
+
["Emulate the lively sounds of a retro arcade: 8-bit game music, coins clinking. People cheering occasionally when players winning"],
|
| 107 |
+
["Simulate a forest ambiance with birds chirping and wind rustling through the leaves"],
|
| 108 |
+
["A train conductor blows a sharp whistle, metal wheels screech on the rails, and passengers murmur while settling into their seats"],
|
| 109 |
+
["Generate an energetic and bustling city street scene with distant traffic and close conversations"],
|
| 110 |
+
["Alarms blare with rising urgency as fragments clatter against a metallic hull, interrupted by a faint hiss of escaping air"],
|
| 111 |
+
["Create a serene soundscape of a quiet beach at sunset"],
|
| 112 |
+
["Tiny pops and hisses of chemical reactions intermingle with the rhythmic pumping of a centrifuge and the soft whirr of air filtration"],
|
| 113 |
+
["A train conductor blows a sharp whistle, metal wheels screech on the rails, and passengers murmur while settling into their seats"],
|
| 114 |
+
["Emulate the lively sounds of a retro arcade: 8-bit game music, coins clinking. People cheering occasionally when players winning"],
|
| 115 |
+
["Quiet whispered conversation gradually fading into distant jet engine roar diminishing into silence"],
|
| 116 |
+
["Clear sound of bicycle tires crunching on loose gravel and dirt, followed by deep male laughter echoing"],
|
| 117 |
+
["Multiple ducks quacking loudly with splashing water and piercing wild animal shriek in background"],
|
| 118 |
+
["Create the underwater soundscape: gentle waves, faint whale calls, and the occasional clink of scuba gear"],
|
| 119 |
+
["Recreate the sounds of an active volcano: rumbling earth, lava bubbling, and the occasional loud explosive roar of an eruption"],
|
| 120 |
+
["A pile of coins spills onto a wooden table with a metallic clatter, followed by the hushed murmur of a tavern crowd and the creak of a swinging door"],
|
| 121 |
+
["Clear male voice speaking, sharp popping sound, followed by genuine group laughter"],
|
| 122 |
+
["Stream of water hitting empty ceramic cup, pitch rising as cup fills up"],
|
| 123 |
+
["Massive crowd erupting in thunderous applause and excited cheering"],
|
| 124 |
+
["Deep rolling thunder with bright lightning strikes crackling through sky"],
|
| 125 |
+
["Aggressive dog barking and distressed cat meowing as racing car roars past at high speed"],
|
| 126 |
+
["Peaceful stream bubbling and birds singing, interrupted by sudden explosive gunshot"],
|
| 127 |
+
["Man speaking outdoors, goat bleating loudly, metal gate scraping closed, ducks quacking frantically, wind howling into microphone"],
|
| 128 |
+
["Series of loud aggressive dog barks echoing"],
|
| 129 |
+
["Multiple distinct cat meows at different pitches"],
|
| 130 |
+
["Rhythmic wooden table tapping overlaid with steady water pouring sound"],
|
| 131 |
+
["Sustained crowd applause with camera clicks and amplified male announcer voice"],
|
| 132 |
+
["Two sharp gunshots followed by panicked birds taking flight with rapid wing flaps"],
|
| 133 |
+
["Deep rhythmic snoring with clear breathing patterns"],
|
| 134 |
+
["Multiple racing engines revving and accelerating with sharp whistle piercing through"],
|
| 135 |
+
["Massive stadium crowd cheering as thunder crashes and lightning strikes"],
|
| 136 |
+
["Heavy helicopter blades chopping through air with engine and wind noise"],
|
| 137 |
+
["Dog barking excitedly and man shouting as race car engine roars past"],
|
| 138 |
+
["A bicycle peddling on dirt and gravel followed by a man speaking then laughing"],
|
| 139 |
+
["Ducks quack and water splashes with some animal screeching in the background"],
|
| 140 |
+
["Describe the sound of the ocean"],
|
| 141 |
+
["A woman and a baby are having a conversation"],
|
| 142 |
+
["A man speaks followed by a popping noise and laughter"],
|
| 143 |
+
["A cup is filled from a faucet"],
|
| 144 |
+
["An audience cheering and clapping"],
|
| 145 |
+
["Rolling thunder with lightning strikes"],
|
| 146 |
+
["A dog barking and a cat mewing and a racing car passes by"],
|
| 147 |
+
["Gentle water stream, birds chirping and sudden gun shot"],
|
| 148 |
+
["A dog barking"],
|
| 149 |
+
["A cat meowing"],
|
| 150 |
+
["Wooden table tapping sound while water pouring"],
|
| 151 |
+
["Applause from a crowd with distant clicking and a man speaking over a loudspeaker"],
|
| 152 |
+
["two gunshots followed by birds flying away while chirping"],
|
| 153 |
+
["Whistling with birds chirping"],
|
| 154 |
+
["A person snoring"],
|
| 155 |
+
["Motor vehicles are driving with loud engines and a person whistles"],
|
| 156 |
+
["People cheering in a stadium while thunder and lightning strikes"],
|
| 157 |
+
["A helicopter is in flight"],
|
| 158 |
+
["A dog barking and a man talking and a racing car passes by"],
|
| 159 |
],
|
| 160 |
cache_examples="lazy", # Turn on to cache.
|
| 161 |
)
|
| 162 |
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
gr_interface.queue(15).launch()
|