ScaleTable.lua 1.2 KB

1234567891011121314151617181920212223242526272829303132333435363738
  1. local ScaleTable, parent = torch.class("w2nn.ScaleTable", "nn.Module")
  2. function ScaleTable:__init()
  3. parent.__init(self)
  4. self.gradInput = {}
  5. self.grad_tmp = torch.Tensor()
  6. self.scale = torch.Tensor()
  7. end
  8. function ScaleTable:updateOutput(input)
  9. assert(#input == 2)
  10. assert(input[1]:size(2) == input[2]:size(2))
  11. self.scale:resizeAs(input[1]):expandAs(input[2], input[1])
  12. self.output:resizeAs(self.scale):copy(self.scale)
  13. self.output:cmul(input[1])
  14. return self.output
  15. end
  16. function ScaleTable:updateGradInput(input, gradOutput)
  17. self.gradInput[1] = self.gradInput[1] or input[1].new()
  18. self.gradInput[1]:resizeAs(input[1]):copy(gradOutput)
  19. self.gradInput[1]:cmul(self.scale)
  20. self.grad_tmp:resizeAs(input[1]):copy(gradOutput)
  21. self.grad_tmp:cmul(input[1])
  22. self.gradInput[2] = self.gradInput[2] or input[2].new()
  23. self.gradInput[2]:resizeAs(input[2]):sum(self.grad_tmp:reshape(self.grad_tmp:size(1), self.grad_tmp:size(2), self.grad_tmp:size(3) * self.grad_tmp:size(4)), 3):resizeAs(input[2])
  24. for i=#input+1, #self.gradInput do
  25. self.gradInput[i] = nil
  26. end
  27. return self.gradInput
  28. end
  29. function ScaleTable:clearState()
  30. nn.utils.clear(self, {'grad_tmp','scale'})
  31. return parent:clearState()
  32. end