class AttentionLayer(nn.Module):
def __init__(self, in_channels, out_channels, key_channels):
- super(AttentionLayer, self).__init__()
+ super().__init__()
self.conv_Q = nn.Conv1d(in_channels, key_channels, kernel_size = 1, bias = False)
self.conv_K = nn.Conv1d(in_channels, key_channels, kernel_size = 1, bias = False)
self.conv_V = nn.Conv1d(in_channels, out_channels, kernel_size = 1, bias = False)