Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -202,27 +202,43 @@ def resample_waveform(waveform, original_sample_rate, target_sample_rate):
|
|
| 202 |
# segments.append(waveform)
|
| 203 |
|
| 204 |
# return segments
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
def split_audio(waveform, sample_rate, segment_duration=10):
|
| 208 |
segment_samples = segment_duration * sample_rate
|
| 209 |
total_samples = waveform.size(0)
|
| 210 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
segments = []
|
| 212 |
-
for start in range(0,
|
| 213 |
end = start + segment_samples
|
| 214 |
-
if end <=
|
| 215 |
segment = waveform[start:end]
|
| 216 |
segments.append(segment)
|
| 217 |
|
| 218 |
-
# If no full segments were created, pad the short waveform
|
| 219 |
-
if len(segments) == 0:
|
| 220 |
-
pad_length = segment_samples - total_samples
|
| 221 |
-
padded_waveform = torch.nn.functional.pad(waveform, (0, pad_length))
|
| 222 |
-
segments.append(padded_waveform)
|
| 223 |
-
|
| 224 |
return segments
|
| 225 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 226 |
# def split_audio(waveform, sample_rate):
|
| 227 |
# segment_samples = segment_duration * sample_rate
|
| 228 |
# total_samples = waveform.size(0)
|
|
|
|
| 202 |
# segments.append(waveform)
|
| 203 |
|
| 204 |
# return segments
|
| 205 |
+
def split_audio(waveform, sample_rate):
|
|
|
|
|
|
|
| 206 |
segment_samples = segment_duration * sample_rate
|
| 207 |
total_samples = waveform.size(0)
|
| 208 |
|
| 209 |
+
# Pad if shorter than one segment
|
| 210 |
+
if total_samples < segment_samples:
|
| 211 |
+
pad_size = segment_samples - total_samples
|
| 212 |
+
waveform = torch.nn.functional.pad(waveform, (0, pad_size))
|
| 213 |
+
|
| 214 |
segments = []
|
| 215 |
+
for start in range(0, waveform.size(0), segment_samples):
|
| 216 |
end = start + segment_samples
|
| 217 |
+
if end <= waveform.size(0):
|
| 218 |
segment = waveform[start:end]
|
| 219 |
segments.append(segment)
|
| 220 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
return segments
|
| 222 |
|
| 223 |
+
# def split_audio(waveform, sample_rate, segment_duration=10):
|
| 224 |
+
# segment_samples = segment_duration * sample_rate
|
| 225 |
+
# total_samples = waveform.size(0)
|
| 226 |
+
|
| 227 |
+
# segments = []
|
| 228 |
+
# for start in range(0, total_samples, segment_samples):
|
| 229 |
+
# end = start + segment_samples
|
| 230 |
+
# if end <= total_samples:
|
| 231 |
+
# segment = waveform[start:end]
|
| 232 |
+
# segments.append(segment)
|
| 233 |
+
|
| 234 |
+
# # If no full segments were created, pad the short waveform
|
| 235 |
+
# if len(segments) == 0:
|
| 236 |
+
# pad_length = segment_samples - total_samples
|
| 237 |
+
# padded_waveform = torch.nn.functional.pad(waveform, (0, pad_length))
|
| 238 |
+
# segments.append(padded_waveform)
|
| 239 |
+
|
| 240 |
+
# return segments
|
| 241 |
+
|
| 242 |
# def split_audio(waveform, sample_rate):
|
| 243 |
# segment_samples = segment_duration * sample_rate
|
| 244 |
# total_samples = waveform.size(0)
|