-
Notifications
You must be signed in to change notification settings - Fork 2
/
losses.py
78 lines (57 loc) · 2.9 KB
/
losses.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import tensorflow as tf
from tensorflow.keras import layers as tfkl
from dsp_utils import spectral_ops
from pretrained_crepe import PretrainedCREPE
class SpectralLoss(tfkl.Layer):
def __init__(self,
fft_sizes=(2048, 1024, 512, 256, 128, 64),
logmag_weight=1.0,
name='spectral_loss'):
super().__init__(name=name)
self.fft_sizes = fft_sizes
self.logmag_weight = logmag_weight
def call(self, inputs):
audio, target_audio = inputs["audio"], inputs["target_audio"]
total_loss = 0
for size in self.fft_sizes:
total_loss += self.calculate_loss_for_fft_size(audio,target_audio,size)
return {"spec_loss": total_loss}
def calculate_loss_for_fft_size(self, audio, target_audio, size):
# magnitude spectrograms of the true audio and the sythesized version
mag_audio = spectral_ops.compute_mag(audio,size=size)
mag_target = spectral_ops.compute_mag(target_audio,size=size)
log_mag_audio = spectral_ops.safe_log(mag_audio)
log_mag_target_audio = spectral_ops.safe_log(mag_target)
mag_loss = tf.math.reduce_mean(tf.math.abs(mag_audio - mag_target))
log_mag_loss = tf.math.reduce_mean(tf.math.abs(log_mag_audio - log_mag_target_audio))
return mag_loss + self.logmag_weight * log_mag_loss
class PerceptualLoss(tfkl.Layer):
def __init__(self,
weight=38.0,
model_capacity='tiny',
name='pretrained_crepe_embedding_loss',
activation_layer='conv5-BN'):
super().__init__(name=name)
self.weight = weight
self.pretrained_model = PretrainedCREPE(model_capacity=model_capacity,
activation_layer=activation_layer)
def call(self, inputs):
audio, target_audio = inputs["audio"], inputs["target_audio"]
audio, target_audio = tf_float32(audio), tf_float32(target_audio)
target_emb = self.pretrained_model(target_audio)
synth_emb = self.pretrained_model(audio)
loss = self.weight * tf.reduce_mean(tf.abs(target_emb - synth_emb))
return {"perc_loss":loss}
# Used in Unsupervised
class MultiLoss(tfkl.Layer):
def __init__(self, logmag_weight=1.0, perceptual_loss_weight=5e-5, name="multi_loss"):
super().__init__(name=name)
self.spec_loss_fn = SpectralLoss(logmag_weight=logmag_weight)
self.perceptual_loss_fn = PerceptualLoss(weight=perceptual_loss_weight)
def call(self, inputs):
spec_loss = self.spec_loss_fn(inputs)['spec_loss']
perc_loss = self.perceptual_loss_fn(inputs)['perc_loss']
total_loss = spec_loss+perc_loss
return {"total_loss":total_loss, "spec_loss": spec_loss, "perc_loss": perc_loss}
def tf_float32(x):
return tf.cast(x,dtype=tf.float32)