Skip to content

Commit

Permalink
update slicer
Browse files Browse the repository at this point in the history
  • Loading branch information
innnky committed Dec 31, 2022
1 parent 882ad4b commit c004402
Show file tree
Hide file tree
Showing 2 changed files with 98 additions and 123 deletions.
210 changes: 97 additions & 113 deletions inference/slicer.py
Original file line number Diff line number Diff line change
@@ -1,143 +1,128 @@
import time

import numpy as np
import librosa
import torch
import torchaudio
from scipy.ndimage import maximum_filter1d, uniform_filter1d


def timeit(func):
def run(*args, **kwargs):
t = time.time()
res = func(*args, **kwargs)
print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t))
return res

return run


# @timeit
def _window_maximum(arr, win_sz):
return maximum_filter1d(arr, size=win_sz)[win_sz // 2: win_sz // 2 + arr.shape[0] - win_sz + 1]


# @timeit
def _window_rms(arr, win_sz):
filtered = np.sqrt(uniform_filter1d(np.power(arr, 2), win_sz) - np.power(uniform_filter1d(arr, win_sz), 2))
return filtered[win_sz // 2: win_sz // 2 + arr.shape[0] - win_sz + 1]


def level2db(levels, eps=1e-12):
return 20 * np.log10(np.clip(levels, a_min=eps, a_max=1))


def _apply_slice(audio, begin, end):
if len(audio.shape) > 1:
return audio[:, begin: end]
else:
return audio[begin: end]


class Slicer:
def __init__(self,
sr: int,
db_threshold: float = -40,
threshold: float = -40.,
min_length: int = 5000,
win_l: int = 300,
win_s: int = 20,
max_silence_kept: int = 500):
self.db_threshold = db_threshold
self.min_samples = round(sr * min_length / 1000)
self.win_ln = round(sr * win_l / 1000)
self.win_sn = round(sr * win_s / 1000)
self.max_silence = round(sr * max_silence_kept / 1000)
if not self.min_samples >= self.win_ln >= self.win_sn:
raise ValueError('The following condition must be satisfied: min_length >= win_l >= win_s')
if not self.max_silence >= self.win_sn:
raise ValueError('The following condition must be satisfied: max_silence_kept >= win_s')
min_interval: int = 300,
hop_size: int = 20,
max_sil_kept: int = 5000):
if not min_length >= min_interval >= hop_size:
raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
if not max_sil_kept >= hop_size:
raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
min_interval = sr * min_interval / 1000
self.threshold = 10 ** (threshold / 20.)
self.hop_size = round(sr * hop_size / 1000)
self.win_size = min(round(min_interval), 4 * self.hop_size)
self.min_length = round(sr * min_length / 1000 / self.hop_size)
self.min_interval = round(min_interval / self.hop_size)
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)

def _apply_slice(self, waveform, begin, end):
if len(waveform.shape) > 1:
return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
else:
return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]

@timeit
def slice(self, audio):
samples = audio
if samples.shape[0] <= self.min_samples:
return {"0": {"slice": False, "split_time": f"0,{len(audio)}"}}
# get absolute amplitudes
abs_amp = np.abs(samples - np.mean(samples))
# calculate local maximum with large window
win_max_db = level2db(_window_maximum(abs_amp, win_sz=self.win_ln))
# @timeit
def slice(self, waveform):
if len(waveform.shape) > 1:
samples = librosa.to_mono(waveform)
else:
samples = waveform
if samples.shape[0] <= self.min_length:
return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
sil_tags = []
left = right = 0
while right < win_max_db.shape[0]:
if win_max_db[right] < self.db_threshold:
right += 1
elif left == right:
left += 1
right += 1
else:
if left == 0:
split_loc_l = left
silence_start = None
clip_start = 0
for i, rms in enumerate(rms_list):
# Keep looping while frame is silent.
if rms < self.threshold:
# Record start of silent frames.
if silence_start is None:
silence_start = i
continue
# Keep looping while frame is not silent and silence start has not been recorded.
if silence_start is None:
continue
# Clear recorded silence start if interval is not enough or clip is too short
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
if not is_leading_silence and not need_slice_middle:
silence_start = None
continue
# Need slicing. Record the range of silent frames to be removed.
if i - silence_start <= self.max_sil_kept:
pos = rms_list[silence_start: i + 1].argmin() + silence_start
if silence_start == 0:
sil_tags.append((0, pos))
else:
sil_left_n = min(self.max_silence, (right + self.win_ln - left) // 2)
rms_db_left = level2db(_window_rms(samples[left: left + sil_left_n], win_sz=self.win_sn))
split_win_l = left + np.argmin(rms_db_left)
split_loc_l = split_win_l + np.argmin(abs_amp[split_win_l: split_win_l + self.win_sn])
if len(sil_tags) != 0 and split_loc_l - sil_tags[-1][1] < self.min_samples and right < win_max_db.shape[
0] - 1:
right += 1
left = right
continue
if right == win_max_db.shape[0] - 1:
split_loc_r = right + self.win_ln
sil_tags.append((pos, pos))
clip_start = pos
elif i - silence_start <= self.max_sil_kept * 2:
pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
pos += i - self.max_sil_kept
pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
if silence_start == 0:
sil_tags.append((0, pos_r))
clip_start = pos_r
else:
sil_right_n = min(self.max_silence, (right + self.win_ln - left) // 2)
rms_db_right = level2db(_window_rms(samples[right + self.win_ln - sil_right_n: right + self.win_ln],
win_sz=self.win_sn))
split_win_r = right + self.win_ln - sil_right_n + np.argmin(rms_db_right)
split_loc_r = split_win_r + np.argmin(abs_amp[split_win_r: split_win_r + self.win_sn])
sil_tags.append((split_loc_l, split_loc_r))
right += 1
left = right
if left != right:
sil_left_n = min(self.max_silence, (right + self.win_ln - left) // 2)
rms_db_left = level2db(_window_rms(samples[left: left + sil_left_n], win_sz=self.win_sn))
split_win_l = left + np.argmin(rms_db_left)
split_loc_l = split_win_l + np.argmin(abs_amp[split_win_l: split_win_l + self.win_sn])
sil_tags.append((split_loc_l, samples.shape[0]))
sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
clip_start = max(pos_r, pos)
else:
pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
if silence_start == 0:
sil_tags.append((0, pos_r))
else:
sil_tags.append((pos_l, pos_r))
clip_start = pos_r
silence_start = None
# Deal with trailing silence.
total_frames = rms_list.shape[0]
if silence_start is not None and total_frames - silence_start >= self.min_interval:
silence_end = min(total_frames, silence_start + self.max_sil_kept)
pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
sil_tags.append((pos, total_frames + 1))
# Apply and return slices.
if len(sil_tags) == 0:
return {"0": {"slice": False, "split_time": f"0,{len(audio)}"}}
return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
else:
chunks = []
# 第一段静音并非从头开始,补上有声片段
if sil_tags[0][0]:
chunks.append({"slice": False, "split_time": f"0,{sil_tags[0][0]}"})
chunks.append(
{"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
for i in range(0, len(sil_tags)):
# 标识有声片段(跳过第一段)
if i:
chunks.append({"slice": False, "split_time": f"{sil_tags[i - 1][1]},{sil_tags[i][0]}"})
chunks.append({"slice": False,
"split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
# 标识所有静音片段
chunks.append({"slice": True, "split_time": f"{sil_tags[i][0]},{sil_tags[i][1]}"})
chunks.append({"slice": True,
"split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
# 最后一段静音并非结尾,补上结尾片段
if sil_tags[-1][1] != len(audio):
chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1]},{len(audio)}"})
if sil_tags[-1][1] * self.hop_size < len(waveform):
chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
chunk_dict = {}
for i in range(len(chunks)):
chunk_dict[str(i)] = chunks[i]
return chunk_dict


def cut(audio_path, db_thresh=-30, min_len=5000, win_l=300, win_s=20, max_sil_kept=500):
audio, sr = torchaudio.load(audio_path)
if len(audio.shape) == 2 and audio.shape[1] >= 2:
audio = torch.mean(audio, dim=0).unsqueeze(0)
audio = audio.cpu().numpy()[0]

def cut(audio_path, db_thresh=-30, min_len=5000):
audio, sr = librosa.load(audio_path, sr=None)
slicer = Slicer(
sr=sr,
db_threshold=db_thresh,
min_length=min_len,
win_l=win_l,
win_s=win_s,
max_silence_kept=max_sil_kept
threshold=db_thresh,
min_length=min_len
)
chunks = slicer.slice(audio)
return chunks
Expand All @@ -152,7 +137,6 @@ def chunks2audio(audio_path, chunks):
result = []
for k, v in chunks.items():
tag = v["split_time"].split(",")
result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
if tag[0] != tag[1]:
result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
return result, sr


11 changes: 1 addition & 10 deletions inference_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,16 +33,7 @@
raw_audio_path += ".wav"
infer_tool.format_wav(raw_audio_path)
wav_path = Path(raw_audio_path).with_suffix('.wav')
audio, sr = librosa.load(wav_path, mono=True, sr=None)
wav_hash = infer_tool.get_md5(audio)
if wav_hash in chunks_dict.keys():
print("load chunks from temp")
chunks = chunks_dict[wav_hash]["chunks"]
else:
chunks = slicer.cut(wav_path, db_thresh=slice_db)
print(chunks)
chunks_dict[wav_hash] = {"chunks": chunks, "time": int(time.time())}
infer_tool.write_temp("inference/chunks_temp.json", chunks_dict)
chunks = slicer.cut(wav_path, db_thresh=slice_db)
audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)

for spk in spk_list:
Expand Down

0 comments on commit c004402

Please sign in to comment.