Browse Source

Remove unused modules

nagadomi 9 years ago
parent
commit
e50c7d1478
5 changed files with 0 additions and 155 deletions
  1. 0 77
      lib/DepthExpand2x.lua
  2. 0 31
      lib/LeakyReLU_deprecated.lua
  3. 0 19
      lib/PSNRCriterion.lua
  4. 0 25
      lib/WeightedMSECriterion.lua
  5. 0 3
      lib/w2nn.lua

+ 0 - 77
lib/DepthExpand2x.lua

@@ -1,77 +0,0 @@
-if w2nn.DepthExpand2x then
-   return w2nn.DepthExpand2x
-end
-local DepthExpand2x, parent = torch.class('w2nn.DepthExpand2x','nn.Module')
- 
-function DepthExpand2x:__init()
-   parent:__init()
-end
-
-function DepthExpand2x:updateOutput(input)
-   local x = input
-   -- (batch_size, depth, height, width)
-   self.shape = x:size()
-
-   assert(self.shape:size() == 4, "input must be 4d tensor")
-   assert(self.shape[2] % 4 == 0, "depth must be depth % 4 = 0")
-   -- (batch_size, width, height, depth)
-   x = x:transpose(2, 4)
-   -- (batch_size, width, height * 2, depth / 2)
-   x = x:reshape(self.shape[1], self.shape[4], self.shape[3] * 2, self.shape[2] / 2)
-   -- (batch_size, height * 2, width, depth / 2)
-   x = x:transpose(2, 3)
-   -- (batch_size, height * 2, width * 2, depth / 4)
-   x = x:reshape(self.shape[1], self.shape[3] * 2, self.shape[4] * 2, self.shape[2] / 4)
-   -- (batch_size, depth / 4, height * 2, width * 2)
-   x = x:transpose(2, 4)
-   x = x:transpose(3, 4)
-   self.output:resizeAs(x):copy(x) -- contiguous
-   
-   return self.output
-end
-
-function DepthExpand2x:updateGradInput(input, gradOutput)
-   -- (batch_size, depth / 4, height * 2, width * 2)
-   local x = gradOutput
-   -- (batch_size, height * 2, width * 2, depth / 4)
-   x = x:transpose(2, 4)
-   x = x:transpose(2, 3)
-   -- (batch_size, height * 2, width, depth / 2)
-   x = x:reshape(self.shape[1], self.shape[3] * 2, self.shape[4], self.shape[2] / 2)
-   -- (batch_size, width, height * 2, depth / 2)
-   x = x:transpose(2, 3)
-   -- (batch_size, width, height, depth)
-   x = x:reshape(self.shape[1], self.shape[4], self.shape[3], self.shape[2])
-   -- (batch_size, depth, height, width)
-   x = x:transpose(2, 4)
-   
-   self.gradInput:resizeAs(x):copy(x)
-   
-   return self.gradInput
-end
-
-function DepthExpand2x.test()
-   require 'image'
-   local function show(x)
-      local img = torch.Tensor(3, x:size(3), x:size(4))
-      img[1]:copy(x[1][1])
-      img[2]:copy(x[1][2])
-      img[3]:copy(x[1][3])
-      image.display(img)
-   end
-   local img = image.lena()
-   local x = torch.Tensor(1, img:size(1) * 4, img:size(2), img:size(3))
-   for i = 0, img:size(1) * 4 - 1 do
-      src_index = ((i % 3) + 1)
-      x[1][i + 1]:copy(img[src_index])
-   end
-   show(x)
-   
-   local de2x = w2nn.DepthExpand2x()
-   out = de2x:forward(x)
-   show(out)
-   out = de2x:updateGradInput(x, out)
-   show(out)
-end
-
-return DepthExpand2x

+ 0 - 31
lib/LeakyReLU_deprecated.lua

@@ -1,31 +0,0 @@
-if nn.LeakyReLU then
-   return nn.LeakyReLU
-end
-
-local LeakyReLU, parent = torch.class('nn.LeakyReLU','nn.Module')
- 
-function LeakyReLU:__init(negative_scale)
-   parent.__init(self)
-   self.negative_scale = negative_scale or 0.333
-   self.negative = torch.Tensor()
-end
- 
-function LeakyReLU:updateOutput(input)
-   self.output:resizeAs(input):copy(input):abs():add(input):div(2)
-   self.negative:resizeAs(input):copy(input):abs():add(-1.0, input):mul(-0.5*self.negative_scale)
-   self.output:add(self.negative)
-   
-   return self.output
-end
- 
-function LeakyReLU:updateGradInput(input, gradOutput)
-   self.gradInput:resizeAs(gradOutput)
-   -- filter positive
-   self.negative:sign():add(1)
-   torch.cmul(self.gradInput, gradOutput, self.negative)
-   -- filter negative
-   self.negative:add(-1):mul(-1 * self.negative_scale):cmul(gradOutput)
-   self.gradInput:add(self.negative)
-   
-   return self.gradInput
-end

+ 0 - 19
lib/PSNRCriterion.lua

@@ -1,19 +0,0 @@
-local PSNRCriterion, parent = torch.class('w2nn.PSNRCriterion','nn.Criterion')
-
-function PSNRCriterion:__init()
-   parent.__init(self)
-   self.image = torch.Tensor()
-   self.diff = torch.Tensor()
-end
-function PSNRCriterion:updateOutput(input, target)
-   self.image:resizeAs(input):copy(input)
-   self.image:clamp(0.0, 1.0)
-   self.diff:resizeAs(self.image):copy(self.image)
-   
-   local mse = math.max(self.diff:add(-1, target):pow(2):mean(), (0.1/255)^2)
-   self.output = 10 * math.log10(1.0 / mse)
-   return self.output
-end
-function PSNRCriterion:updateGradInput(input, target)
-   error("PSNRCriterion does not support backward")
-end

+ 0 - 25
lib/WeightedMSECriterion.lua

@@ -1,25 +0,0 @@
-local WeightedMSECriterion, parent = torch.class('w2nn.WeightedMSECriterion','nn.Criterion')
-
-function WeightedMSECriterion:__init(w)
-   parent.__init(self)
-   self.weight = w:clone()
-   self.diff = torch.Tensor()
-   self.loss = torch.Tensor()
-end
-
-function WeightedMSECriterion:updateOutput(input, target)
-   self.diff:resizeAs(input):copy(input)
-   for i = 1, input:size(1) do
-      self.diff[i]:add(-1, target[i]):cmul(self.weight)
-   end
-   self.loss:resizeAs(self.diff):copy(self.diff):cmul(self.diff)
-   self.output = self.loss:mean()
-   
-   return self.output
-end
-
-function WeightedMSECriterion:updateGradInput(input, target)
-   local norm = 2.0 / input:nElement()
-   self.gradInput:resizeAs(input):copy(self.diff):mul(norm)
-   return self.gradInput
-end

+ 0 - 3
lib/w2nn.lua

@@ -26,9 +26,6 @@ else
       return model
    end
    require 'LeakyReLU'
-   require 'LeakyReLU_deprecated'
-   require 'DepthExpand2x'
-   require 'PSNRCriterion'
    require 'ClippedWeightedHuberCriterion'
    require 'ClippedMSECriterion'
    return w2nn