emlloss.py 3.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import math
  18. import paddle
  19. import numpy as np
  20. from .comfunc import rerange_index
  21. class EmlLoss(paddle.nn.Layer):
  22. def __init__(self, batch_size=40, samples_each_class=2):
  23. super(EmlLoss, self).__init__()
  24. assert (batch_size % samples_each_class == 0)
  25. self.samples_each_class = samples_each_class
  26. self.batch_size = batch_size
  27. self.rerange_index = rerange_index(batch_size, samples_each_class)
  28. self.thresh = 20.0
  29. self.beta = 100000
  30. def surrogate_function(self, beta, theta, bias):
  31. x = theta * paddle.exp(bias)
  32. output = paddle.log(1 + beta * x) / math.log(1 + beta)
  33. return output
  34. def surrogate_function_approximate(self, beta, theta, bias):
  35. output = (
  36. paddle.log(theta) + bias + math.log(beta)) / math.log(1 + beta)
  37. return output
  38. def surrogate_function_stable(self, beta, theta, target, thresh):
  39. max_gap = paddle.to_tensor(thresh, dtype='float32')
  40. max_gap.stop_gradient = True
  41. target_max = paddle.maximum(target, max_gap)
  42. target_min = paddle.minimum(target, max_gap)
  43. loss1 = self.surrogate_function(beta, theta, target_min)
  44. loss2 = self.surrogate_function_approximate(beta, theta, target_max)
  45. bias = self.surrogate_function(beta, theta, max_gap)
  46. loss = loss1 + loss2 - bias
  47. return loss
  48. def forward(self, input, target=None):
  49. features = input["features"]
  50. samples_each_class = self.samples_each_class
  51. batch_size = self.batch_size
  52. rerange_index = self.rerange_index
  53. #calc distance
  54. diffs = paddle.unsqueeze(
  55. features, axis=1) - paddle.unsqueeze(
  56. features, axis=0)
  57. similary_matrix = paddle.sum(paddle.square(diffs), axis=-1)
  58. tmp = paddle.reshape(similary_matrix, shape=[-1, 1])
  59. rerange_index = paddle.to_tensor(rerange_index)
  60. tmp = paddle.gather(tmp, index=rerange_index)
  61. similary_matrix = paddle.reshape(tmp, shape=[-1, batch_size])
  62. ignore, pos, neg = paddle.split(
  63. similary_matrix,
  64. num_or_sections=[
  65. 1, samples_each_class - 1, batch_size - samples_each_class
  66. ],
  67. axis=1)
  68. ignore.stop_gradient = True
  69. pos_max = paddle.max(pos, axis=1, keepdim=True)
  70. pos = paddle.exp(pos - pos_max)
  71. pos_mean = paddle.mean(pos, axis=1, keepdim=True)
  72. neg_min = paddle.min(neg, axis=1, keepdim=True)
  73. neg = paddle.exp(neg_min - neg)
  74. neg_mean = paddle.mean(neg, axis=1, keepdim=True)
  75. bias = pos_max - neg_min
  76. theta = paddle.multiply(neg_mean, pos_mean)
  77. loss = self.surrogate_function_stable(self.beta, theta, bias,
  78. self.thresh)
  79. loss = paddle.mean(loss)
  80. return {"emlloss": loss}