reconstruct.lua 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. require 'image'
  2. local iproc = require 'iproc'
  3. local srcnn = require 'srcnn'
  4. local function reconstruct_nn(model, x, inner_scale, offset, block_size, batch_size)
  5. batch_size = batch_size or 1
  6. if x:dim() == 2 then
  7. x = x:reshape(1, x:size(1), x:size(2))
  8. end
  9. local ch = x:size(1)
  10. local new_x = torch.Tensor(x:size(1), x:size(2) * inner_scale, x:size(3) * inner_scale):zero()
  11. local input_block_size = block_size / inner_scale
  12. local output_block_size = block_size
  13. local output_size = output_block_size - offset * 2
  14. local output_size_in_input = input_block_size - math.ceil(offset / inner_scale) * 2
  15. local input_indexes = {}
  16. local output_indexes = {}
  17. for i = 1, x:size(2), output_size_in_input do
  18. for j = 1, x:size(3), output_size_in_input do
  19. if i + input_block_size - 1 <= x:size(2) and j + input_block_size - 1 <= x:size(3) then
  20. local index = {{},
  21. {i, i + input_block_size - 1},
  22. {j, j + input_block_size - 1}}
  23. local ii = (i - 1) * inner_scale + 1
  24. local jj = (j - 1) * inner_scale + 1
  25. local output_index = {{}, { ii , ii + output_size - 1 },
  26. { jj, jj + output_size - 1}}
  27. table.insert(input_indexes, index)
  28. table.insert(output_indexes, output_index)
  29. end
  30. end
  31. end
  32. local input = torch.Tensor(batch_size, ch, input_block_size, input_block_size)
  33. local input_cuda = torch.CudaTensor(batch_size, ch, input_block_size, input_block_size)
  34. for i = 1, #input_indexes, batch_size do
  35. local c = 0
  36. local output
  37. for j = 0, batch_size - 1 do
  38. if i + j > #input_indexes then
  39. break
  40. end
  41. input[j+1]:copy(x[input_indexes[i + j]])
  42. c = c + 1
  43. end
  44. input_cuda:copy(input)
  45. if c == batch_size then
  46. output = model:forward(input_cuda)
  47. else
  48. output = model:forward(input_cuda:narrow(1, 1, c))
  49. end
  50. --output = output:view(batch_size, ch, output_size, output_size)
  51. for j = 0, c - 1 do
  52. new_x[output_indexes[i + j]]:copy(output[j+1])
  53. end
  54. end
  55. return new_x
  56. end
  57. local reconstruct = {}
  58. function reconstruct.is_rgb(model)
  59. if srcnn.channels(model) == 3 then
  60. -- 3ch RGB
  61. return true
  62. else
  63. -- 1ch Y
  64. return false
  65. end
  66. end
  67. function reconstruct.offset_size(model)
  68. return srcnn.offset_size(model)
  69. end
  70. function reconstruct.has_resize(model)
  71. return srcnn.scale_factor(model) > 1
  72. end
  73. function reconstruct.inner_scale(model)
  74. return srcnn.scale_factor(model)
  75. end
  76. local function padding_params(x, model, block_size)
  77. local p = {}
  78. local offset = reconstruct.offset_size(model)
  79. p.x_w = x:size(3)
  80. p.x_h = x:size(2)
  81. p.inner_scale = reconstruct.inner_scale(model)
  82. local input_offset = math.ceil(offset / p.inner_scale)
  83. local input_block_size = block_size / p.inner_scale
  84. local process_size = input_block_size - input_offset * 2
  85. local h_blocks = math.floor(p.x_h / process_size) +
  86. ((p.x_h % process_size == 0 and 0) or 1)
  87. local w_blocks = math.floor(p.x_w / process_size) +
  88. ((p.x_w % process_size == 0 and 0) or 1)
  89. local h = (h_blocks * process_size) + input_offset * 2
  90. local w = (w_blocks * process_size) + input_offset * 2
  91. p.pad_h1 = input_offset
  92. p.pad_w1 = input_offset
  93. p.pad_h2 = (h - input_offset) - p.x_h
  94. p.pad_w2 = (w - input_offset) - p.x_w
  95. return p
  96. end
  97. function reconstruct.image_y(model, x, offset, block_size, batch_size)
  98. block_size = block_size or 128
  99. local p = padding_params(x, model, block_size)
  100. x = image.rgb2yuv(iproc.padding(x, p.pad_w1, p.pad_w2, p.pad_h1, p.pad_h2))
  101. local y = reconstruct_nn(model, x[1], p.inner_scale, offset, block_size, batch_size)
  102. x = iproc.crop(x, p.pad_w1, p.pad_w2, p.pad_w1 + p.x_w, p.pad_w2 + p.x_h)
  103. y = iproc.crop(y, 0, 0, p.x_w, p.x_h)
  104. y[torch.lt(y, 0)] = 0
  105. y[torch.gt(y, 1)] = 1
  106. x[1]:copy(y)
  107. local output = image.yuv2rgb(x)
  108. output[torch.lt(output, 0)] = 0
  109. output[torch.gt(output, 1)] = 1
  110. x = nil
  111. y = nil
  112. collectgarbage()
  113. return output
  114. end
  115. function reconstruct.scale_y(model, scale, x, offset, block_size, batch_size, upsampling_filter)
  116. upsampling_filter = upsampling_filter or "Box"
  117. block_size = block_size or 128
  118. local x_lanczos
  119. if reconstruct.has_resize(model) then
  120. x_lanczos = iproc.scale(x, x:size(3) * scale, x:size(2) * scale, "Lanczos")
  121. else
  122. x_lanczos = iproc.scale(x, x:size(3) * scale, x:size(2) * scale, "Lanczos")
  123. x = iproc.scale(x, x:size(3) * scale, x:size(2) * scale, upsampling_filter)
  124. end
  125. local p = padding_params(x, model, block_size)
  126. if p.x_w * p.x_h > 2048*2048 then
  127. collectgarbage()
  128. end
  129. x = image.rgb2yuv(iproc.padding(x, p.pad_w1, p.pad_w2, p.pad_h1, p.pad_h2))
  130. x_lanczos = image.rgb2yuv(x_lanczos)
  131. local y = reconstruct_nn(model, x[1], p.inner_scale, offset, block_size, batch_size)
  132. y = iproc.crop(y, 0, 0, p.x_w * p.inner_scale, p.x_h * p.inner_scale)
  133. y[torch.lt(y, 0)] = 0
  134. y[torch.gt(y, 1)] = 1
  135. x_lanczos[1]:copy(y)
  136. local output = image.yuv2rgb(x_lanczos)
  137. output[torch.lt(output, 0)] = 0
  138. output[torch.gt(output, 1)] = 1
  139. x = nil
  140. x_lanczos = nil
  141. y = nil
  142. collectgarbage()
  143. return output
  144. end
  145. function reconstruct.image_rgb(model, x, offset, block_size, batch_size)
  146. block_size = block_size or 128
  147. local p = padding_params(x, model, block_size)
  148. x = iproc.padding(x, p.pad_w1, p.pad_w2, p.pad_h1, p.pad_h2)
  149. if p.x_w * p.x_h > 2048*2048 then
  150. collectgarbage()
  151. end
  152. local y = reconstruct_nn(model, x, p.inner_scale, offset, block_size, batch_size)
  153. local output = iproc.crop(y, 0, 0, p.x_w, p.x_h)
  154. output[torch.lt(output, 0)] = 0
  155. output[torch.gt(output, 1)] = 1
  156. x = nil
  157. y = nil
  158. collectgarbage()
  159. return output
  160. end
  161. function reconstruct.scale_rgb(model, scale, x, offset, block_size, batch_size, upsampling_filter)
  162. upsampling_filter = upsampling_filter or "Box"
  163. block_size = block_size or 128
  164. if not reconstruct.has_resize(model) then
  165. x = iproc.scale(x, x:size(3) * scale, x:size(2) * scale, upsampling_filter)
  166. end
  167. local p = padding_params(x, model, block_size)
  168. x = iproc.padding(x, p.pad_w1, p.pad_w2, p.pad_h1, p.pad_h2)
  169. if p.x_w * p.x_h > 2048*2048 then
  170. collectgarbage()
  171. end
  172. local y
  173. y = reconstruct_nn(model, x, p.inner_scale, offset, block_size, batch_size)
  174. local output = iproc.crop(y, 0, 0, p.x_w * p.inner_scale, p.x_h * p.inner_scale)
  175. output[torch.lt(output, 0)] = 0
  176. output[torch.gt(output, 1)] = 1
  177. x = nil
  178. y = nil
  179. collectgarbage()
  180. return output
  181. end
  182. function reconstruct.image(model, x, block_size)
  183. local i2rgb = false
  184. if x:size(1) == 1 then
  185. local new_x = torch.Tensor(3, x:size(2), x:size(3))
  186. new_x[1]:copy(x)
  187. new_x[2]:copy(x)
  188. new_x[3]:copy(x)
  189. x = new_x
  190. i2rgb = true
  191. end
  192. if reconstruct.is_rgb(model) then
  193. x = reconstruct.image_rgb(model, x,
  194. reconstruct.offset_size(model), block_size)
  195. else
  196. x = reconstruct.image_y(model, x,
  197. reconstruct.offset_size(model), block_size)
  198. end
  199. if i2rgb then
  200. x = image.rgb2y(x)
  201. end
  202. return x
  203. end
  204. function reconstruct.scale(model, scale, x, block_size, upsampling_filter)
  205. local i2rgb = false
  206. if x:size(1) == 1 then
  207. local new_x = torch.Tensor(3, x:size(2), x:size(3))
  208. new_x[1]:copy(x)
  209. new_x[2]:copy(x)
  210. new_x[3]:copy(x)
  211. x = new_x
  212. i2rgb = true
  213. end
  214. if reconstruct.is_rgb(model) then
  215. x = reconstruct.scale_rgb(model, scale, x,
  216. reconstruct.offset_size(model),
  217. block_size,
  218. upsampling_filter)
  219. else
  220. x = reconstruct.scale_y(model, scale, x,
  221. reconstruct.offset_size(model),
  222. block_size,
  223. upsampling_filter)
  224. end
  225. if i2rgb then
  226. x = image.rgb2y(x)
  227. end
  228. return x
  229. end
  230. local function tr_f(a)
  231. return a:transpose(2, 3):contiguous()
  232. end
  233. local function itr_f(a)
  234. return a:transpose(2, 3):contiguous()
  235. end
  236. local augmented_patterns = {
  237. {
  238. forward = function (a) return a end,
  239. backward = function (a) return a end
  240. },
  241. {
  242. forward = function (a) return image.hflip(a) end,
  243. backward = function (a) return image.hflip(a) end
  244. },
  245. {
  246. forward = function (a) return image.vflip(a) end,
  247. backward = function (a) return image.vflip(a) end
  248. },
  249. {
  250. forward = function (a) return image.hflip(image.vflip(a)) end,
  251. backward = function (a) return image.vflip(image.hflip(a)) end
  252. },
  253. {
  254. forward = function (a) return tr_f(a) end,
  255. backward = function (a) return itr_f(a) end
  256. },
  257. {
  258. forward = function (a) return image.hflip(tr_f(a)) end,
  259. backward = function (a) return itr_f(image.hflip(a)) end
  260. },
  261. {
  262. forward = function (a) return image.vflip(tr_f(a)) end,
  263. backward = function (a) return itr_f(image.vflip(a)) end
  264. },
  265. {
  266. forward = function (a) return image.hflip(image.vflip(tr_f(a))) end,
  267. backward = function (a) return itr_f(image.vflip(image.hflip(a))) end
  268. }
  269. }
  270. local function get_augmented_patterns(n)
  271. if n == 2 then
  272. return {augmented_patterns[1], augmented_patterns[5]}
  273. elseif n == 4 then
  274. return {augmented_patterns[1], augmented_patterns[5],
  275. augmented_patterns[2], augmented_patterns[7]}
  276. elseif n == 8 then
  277. return augmented_patterns
  278. else
  279. error("unsupported TTA level: " .. n)
  280. end
  281. end
  282. local function tta(f, n, model, x, block_size)
  283. local average = nil
  284. local offset = reconstruct.offset_size(model)
  285. local augments = get_augmented_patterns(n)
  286. for i = 1, #augments do
  287. local out = augments[i].backward(f(model, augments[i].forward(x), offset, block_size))
  288. if not average then
  289. average = out
  290. else
  291. average:add(out)
  292. end
  293. end
  294. return average:div(#augments)
  295. end
  296. function reconstruct.image_tta(model, n, x, block_size)
  297. if reconstruct.is_rgb(model) then
  298. return tta(reconstruct.image_rgb, n, model, x, block_size)
  299. else
  300. return tta(reconstruct.image_y, n, model, x, block_size)
  301. end
  302. end
  303. function reconstruct.scale_tta(model, n, scale, x, block_size, upsampling_filter)
  304. if reconstruct.is_rgb(model) then
  305. local f = function (model, x, offset, block_size)
  306. return reconstruct.scale_rgb(model, scale, x, offset, block_size, upsampling_filter)
  307. end
  308. return tta(f, n, model, x, block_size)
  309. else
  310. local f = function (model, x, offset, block_size)
  311. return reconstruct.scale_y(model, scale, x, offset, block_size, upsampling_filter)
  312. end
  313. return tta(f, n, model, x, block_size)
  314. end
  315. end
  316. return reconstruct