-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsample_script.py
92 lines (77 loc) · 2.96 KB
/
sample_script.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# Import functions and packages
import IPython.display as ipd
import librosa
import matplotlib.pyplot as plt # type: ignore
import numpy as np
import torch
from spleeter.separator import Separator
import zeroNoteSamba.processing.input_rep as IR
from zeroNoteSamba.models.models import Down_CNN
# Run Spleeter and create percussive and non-percussive tracks
separator = Separator("spleeter:4stems")
y, _ = librosa.load("audio_example.mp3", sr=44100, mono=True)
stems = separator.separate(waveform=y.reshape((len(y), 1)))
drums = (stems["drums"][:, 0] + stems["drums"][:, 1]) / 2
other = (
stems["other"][:, 0]
+ stems["other"][:, 1]
+ stems["vocals"][:, 0]
+ stems["vocals"][:, 1]
+ stems["bass"][:, 0]
+ stems["bass"][:, 1]
) / 2
drums = librosa.resample(y=drums, orig_sr=44100, target_sr=16000)
other = librosa.resample(y=other, orig_sr=44100, target_sr=16000)
# Generate VQTs
vqt_postve = torch.from_numpy(IR.generate_XQT(drums, 16000, "vqt"))
vqt_anchor = torch.from_numpy(IR.generate_XQT(other, 16000, "vqt"))
vqt_postve = vqt_postve.reshape(1, 1, vqt_postve.shape[0], vqt_postve.shape[1])
vqt_anchor = vqt_anchor.reshape(1, 1, vqt_anchor.shape[0], vqt_anchor.shape[1])
# Load pretext task model weights
device = torch.device("cpu")
model = Down_CNN()
state_dict = torch.load("zeroNoteSamba/models/saved/shift_pret_cnn_16.pth", map_location=device)
model.pretext.load_state_dict(state_dict)
model.eval()
# Ouput percussive, non-percussive, and combined networks
percussive = model.pretext.postve(vqt_postve.float())
non_percussive = model.pretext.anchor(vqt_anchor.float())
combined = model(vqt_anchor.float(), vqt_postve.float())
# Play each track
ipd.display(ipd.Audio(drums, rate=16000)) # type: ignore
ipd.display(ipd.Audio(other, rate=16000)) # type: ignore
ipd.display(ipd.Audio(librosa.resample(y=y, orig_sr=44100, target_sr=16000), rate=16000)) # type: ignore
# Plot overlapped signals
vqt_len = vqt_postve.shape[3]
plt.plot(non_percussive.detach().numpy().reshape(vqt_len))
plt.plot(percussive.detach().numpy().reshape(vqt_len))
plt.title("Overlapped Embeddings")
plt.legend(["Non-percussive", "Percussive"])
plt.xlabel("Time (samples)")
plt.ylabel("Amplitude")
plt.grid(True)
plt.ylim((-0.1, 1))
plt.yticks([0.0, 0.25, 0.5, 0.75, 1.0])
plt.show()
x1 = np.linspace(0, int(len(drums) / 16000), len(drums))
x2 = np.linspace(0, int(len(drums) / 16000), vqt_len)
plt.plot(x1, drums)
plt.plot(x2, percussive.detach().numpy().reshape(vqt_len))
plt.title("Overlapped Percussive Signal and Embedding")
plt.legend(["Signal", "Embedding"])
plt.xlabel("Time (s)")
plt.ylabel("Amplitude")
plt.grid(True)
plt.ylim((-1, 1))
plt.yticks([-1.0, -0.5, 0.0, 0.5, 1.0])
plt.show()
plt.plot(x1, other)
plt.plot(x2, non_percussive.detach().numpy().reshape(vqt_len))
plt.title("Overlapped Non-percussive Signal and Embedding")
plt.legend(["Signal", "Embedding"])
plt.xlabel("Time (s)")
plt.ylabel("Amplitude")
plt.grid(True)
plt.ylim((-1, 1))
plt.yticks([-1.0, -0.5, 0.0, 0.5, 1.0])
plt.show()