Spaces:
Running
Running
| from diffusers import AudioLDM2Pipeline | |
| import torch | |
| import gradio as gr | |
| model = "cvssp/audioldm2" | |
| pipe = AudioLDM2Pipeline.from_pretrained(model, torch_dtype = torch.float16,).to() | |
| prompt = "A cheerful ukulele strumming in a beachside jam." | |
| generator = torch.Generator().manual_seed(0) | |
| #audio = pipe(prompt,audio_length_in_s = 10.24, generator = generator).audios[0] | |
| def music_gen(prompt, duration=5): | |
| negative_prompt = "Low quality" | |
| audio = pipe( | |
| prompt, | |
| negative_prompt = negative_prompt, | |
| audio_length_in_s = duration, | |
| generator = generator | |
| ).audios[0] | |
| return 16000, audio | |
| interface = gr.Interface( | |
| examples = [["A cheerful ukulele strumming in a beachside jam."],["A violin playing a heartfelt melody."]], | |
| fn = music_gen, | |
| inputs = [gr.Textbox(),gr.Slider(1, 120, value=5, step=1, label="Duration")], | |
| outputs = "audio", | |
| ).launch(debug=True, share=True) |