train.lua 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. require 'pl'
  2. local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
  3. package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.path
  4. require 'optim'
  5. require 'xlua'
  6. require 'w2nn'
  7. local settings = require 'settings'
  8. local srcnn = require 'srcnn'
  9. local minibatch_adam = require 'minibatch_adam'
  10. local iproc = require 'iproc'
  11. local reconstruct = require 'reconstruct'
  12. local compression = require 'compression'
  13. local pairwise_transform = require 'pairwise_transform'
  14. local image_loader = require 'image_loader'
  15. local function save_test_scale(model, rgb, file)
  16. local up = reconstruct.scale(model, settings.scale, rgb, 128, settings.upsampling_filter)
  17. image.save(file, up)
  18. end
  19. local function save_test_jpeg(model, rgb, file)
  20. local im, count = reconstruct.image(model, rgb)
  21. image.save(file, im)
  22. end
  23. local function split_data(x, test_size)
  24. local index = torch.randperm(#x)
  25. local train_size = #x - test_size
  26. local train_x = {}
  27. local valid_x = {}
  28. for i = 1, train_size do
  29. train_x[i] = x[index[i]]
  30. end
  31. for i = 1, test_size do
  32. valid_x[i] = x[index[train_size + i]]
  33. end
  34. return train_x, valid_x
  35. end
  36. local function make_validation_set(x, transformer, n, patches)
  37. n = n or 4
  38. local data = {}
  39. for i = 1, #x do
  40. for k = 1, math.max(n / patches, 1) do
  41. local xy = transformer(x[i], true, patches)
  42. for j = 1, #xy do
  43. table.insert(data, {x = xy[j][1], y = xy[j][2]})
  44. end
  45. end
  46. xlua.progress(i, #x)
  47. collectgarbage()
  48. end
  49. return data
  50. end
  51. local function validate(model, criterion, data, batch_size)
  52. local loss = 0
  53. local loss_count = 0
  54. local inputs_tmp = torch.Tensor(batch_size,
  55. data[1].x:size(1),
  56. data[1].x:size(2),
  57. data[1].x:size(3)):zero()
  58. local targets_tmp = torch.Tensor(batch_size,
  59. data[1].y:size(1),
  60. data[1].y:size(2),
  61. data[1].y:size(3)):zero()
  62. local inputs = inputs_tmp:clone():cuda()
  63. local targets = targets_tmp:clone():cuda()
  64. for t = 1, #data, batch_size do
  65. if t + batch_size -1 > #data then
  66. break
  67. end
  68. for i = 1, batch_size do
  69. inputs_tmp[i]:copy(data[t + i - 1].x)
  70. targets_tmp[i]:copy(data[t + i - 1].y)
  71. end
  72. inputs:copy(inputs_tmp)
  73. targets:copy(targets_tmp)
  74. local z = model:forward(inputs)
  75. loss = loss + criterion:forward(z, targets)
  76. loss_count = loss_count + 1
  77. if loss_count % 10 == 0 then
  78. xlua.progress(t, #data)
  79. collectgarbage()
  80. end
  81. end
  82. xlua.progress(#data, #data)
  83. return loss / loss_count
  84. end
  85. local function create_criterion(model)
  86. if reconstruct.is_rgb(model) then
  87. local offset = reconstruct.offset_size(model)
  88. local output_w = settings.crop_size - offset * 2
  89. local weight = torch.Tensor(3, output_w * output_w)
  90. weight[1]:fill(0.29891 * 3) -- R
  91. weight[2]:fill(0.58661 * 3) -- G
  92. weight[3]:fill(0.11448 * 3) -- B
  93. return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()
  94. else
  95. local offset = reconstruct.offset_size(model)
  96. local output_w = settings.crop_size - offset * 2
  97. local weight = torch.Tensor(1, output_w * output_w)
  98. weight[1]:fill(1.0)
  99. return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()
  100. end
  101. end
  102. local function transformer(x, is_validation, n, offset)
  103. x = compression.decompress(x)
  104. n = n or settings.patches
  105. if is_validation == nil then is_validation = false end
  106. local random_color_noise_rate = nil
  107. local random_overlay_rate = nil
  108. local active_cropping_rate = nil
  109. local active_cropping_tries = nil
  110. if is_validation then
  111. active_cropping_rate = settings.active_cropping_rate
  112. active_cropping_tries = settings.active_cropping_tries
  113. random_color_noise_rate = 0.0
  114. random_overlay_rate = 0.0
  115. else
  116. active_cropping_rate = settings.active_cropping_rate
  117. active_cropping_tries = settings.active_cropping_tries
  118. random_color_noise_rate = settings.random_color_noise_rate
  119. random_overlay_rate = settings.random_overlay_rate
  120. end
  121. if settings.method == "scale" then
  122. return pairwise_transform.scale(x,
  123. settings.scale,
  124. settings.crop_size, offset,
  125. n,
  126. {
  127. downsampling_filters = settings.downsampling_filters,
  128. upsampling_filter = settings.upsampling_filter,
  129. random_half_rate = settings.random_half_rate,
  130. random_color_noise_rate = random_color_noise_rate,
  131. random_overlay_rate = random_overlay_rate,
  132. random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
  133. max_size = settings.max_size,
  134. active_cropping_rate = active_cropping_rate,
  135. active_cropping_tries = active_cropping_tries,
  136. rgb = (settings.color == "rgb"),
  137. gamma_correction = settings.gamma_correction
  138. })
  139. elseif settings.method == "noise" then
  140. return pairwise_transform.jpeg(x,
  141. settings.style,
  142. settings.noise_level,
  143. settings.crop_size, offset,
  144. n,
  145. {
  146. random_half_rate = settings.random_half_rate,
  147. random_color_noise_rate = random_color_noise_rate,
  148. random_overlay_rate = random_overlay_rate,
  149. random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
  150. max_size = settings.max_size,
  151. jpeg_chroma_subsampling_rate = settings.jpeg_chroma_subsampling_rate,
  152. active_cropping_rate = active_cropping_rate,
  153. active_cropping_tries = active_cropping_tries,
  154. nr_rate = settings.nr_rate,
  155. rgb = (settings.color == "rgb")
  156. })
  157. end
  158. end
  159. local function resampling(x, y, train_x, transformer, input_size, target_size)
  160. print("## resampling")
  161. for t = 1, #train_x do
  162. xlua.progress(t, #train_x)
  163. local xy = transformer(train_x[t], false, settings.patches)
  164. for i = 1, #xy do
  165. local index = (t - 1) * settings.patches + i
  166. x[index]:copy(xy[i][1])
  167. y[index]:copy(xy[i][2])
  168. end
  169. if t % 50 == 0 then
  170. collectgarbage()
  171. end
  172. end
  173. end
  174. local function plot(train, valid)
  175. gnuplot.plot({
  176. {'training', torch.Tensor(train), '-'},
  177. {'validation', torch.Tensor(valid), '-'}})
  178. end
  179. local function train()
  180. local hist_train = {}
  181. local hist_valid = {}
  182. local LR_MIN = 1.0e-5
  183. local model = srcnn.create(settings.method, settings.backend, settings.color)
  184. local offset = reconstruct.offset_size(model)
  185. local pairwise_func = function(x, is_validation, n)
  186. return transformer(x, is_validation, n, offset)
  187. end
  188. local criterion = create_criterion(model)
  189. local eval_metric = w2nn.PSNRCriterion():cuda()
  190. local x = torch.load(settings.images)
  191. local train_x, valid_x = split_data(x, math.floor(settings.validation_rate * #x))
  192. local adam_config = {
  193. learningRate = settings.learning_rate,
  194. xBatchSize = settings.batch_size,
  195. }
  196. local lrd_count = 0
  197. local ch = nil
  198. if settings.color == "y" then
  199. ch = 1
  200. elseif settings.color == "rgb" then
  201. ch = 3
  202. end
  203. local best_score = 0.0
  204. print("# make validation-set")
  205. local valid_xy = make_validation_set(valid_x, pairwise_func,
  206. settings.validation_crops,
  207. settings.patches)
  208. valid_x = nil
  209. collectgarbage()
  210. model:cuda()
  211. print("load .. " .. #train_x)
  212. local x = torch.Tensor(settings.patches * #train_x,
  213. ch, settings.crop_size, settings.crop_size)
  214. local y = torch.Tensor(settings.patches * #train_x,
  215. ch * (settings.crop_size - offset * 2) * (settings.crop_size - offset * 2)):zero()
  216. for epoch = 1, settings.epoch do
  217. model:training()
  218. print("# " .. epoch)
  219. resampling(x, y, train_x, pairwise_func)
  220. for i = 1, settings.inner_epoch do
  221. local train_score = minibatch_adam(model, criterion, eval_metric, x, y, adam_config)
  222. print(train_score)
  223. model:evaluate()
  224. print("# validation")
  225. local score = validate(model, eval_metric, valid_xy, adam_config.xBatchSize)
  226. table.insert(hist_train, train_score.PSNR)
  227. table.insert(hist_valid, score)
  228. if settings.plot then
  229. plot(hist_train, hist_valid)
  230. end
  231. if score > best_score then
  232. local test_image = image_loader.load_float(settings.test) -- reload
  233. lrd_count = 0
  234. best_score = score
  235. print("* update best model")
  236. if settings.save_history then
  237. torch.save(string.format(settings.model_file, epoch, i), model:clearState(), "ascii")
  238. if settings.method == "noise" then
  239. local log = path.join(settings.model_dir,
  240. ("noise%d_best.%d-%d.png"):format(settings.noise_level,
  241. epoch, i))
  242. save_test_jpeg(model, test_image, log)
  243. elseif settings.method == "scale" then
  244. local log = path.join(settings.model_dir,
  245. ("scale%.1f_best.%d-%d.png"):format(settings.scale,
  246. epoch, i))
  247. save_test_scale(model, test_image, log)
  248. end
  249. else
  250. torch.save(settings.model_file, model:clearState(), "ascii")
  251. if settings.method == "noise" then
  252. local log = path.join(settings.model_dir,
  253. ("noise%d_best.png"):format(settings.noise_level))
  254. save_test_jpeg(model, test_image, log)
  255. elseif settings.method == "scale" then
  256. local log = path.join(settings.model_dir,
  257. ("scale%.1f_best.png"):format(settings.scale))
  258. save_test_scale(model, test_image, log)
  259. end
  260. end
  261. else
  262. lrd_count = lrd_count + 1
  263. if lrd_count > 2 and adam_config.learningRate > LR_MIN then
  264. adam_config.learningRate = adam_config.learningRate * 0.8
  265. print("* learning rate decay: " .. adam_config.learningRate)
  266. lrd_count = 0
  267. end
  268. end
  269. print("current: " .. score .. ", best: " .. best_score)
  270. collectgarbage()
  271. end
  272. end
  273. end
  274. if settings.gpu > 0 then
  275. cutorch.setDevice(settings.gpu)
  276. end
  277. torch.manualSeed(settings.seed)
  278. cutorch.manualSeed(settings.seed)
  279. print(settings)
  280. train()