train.lua 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. require 'pl'
  2. local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
  3. package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.path
  4. require 'optim'
  5. require 'xlua'
  6. require 'w2nn'
  7. local settings = require 'settings'
  8. local srcnn = require 'srcnn'
  9. local minibatch_adam = require 'minibatch_adam'
  10. local iproc = require 'iproc'
  11. local reconstruct = require 'reconstruct'
  12. local compression = require 'compression'
  13. local pairwise_transform = require 'pairwise_transform'
  14. local image_loader = require 'image_loader'
  15. local function save_test_scale(model, rgb, file)
  16. local up = reconstruct.scale(model, settings.scale, rgb,
  17. settings.scale * settings.crop_size,
  18. settings.upsampling_filter)
  19. image.save(file, up)
  20. end
  21. local function save_test_jpeg(model, rgb, file)
  22. local im, count = reconstruct.image(model, rgb)
  23. image.save(file, im)
  24. end
  25. local function split_data(x, test_size)
  26. local index = torch.randperm(#x)
  27. local train_size = #x - test_size
  28. local train_x = {}
  29. local valid_x = {}
  30. for i = 1, train_size do
  31. train_x[i] = x[index[i]]
  32. end
  33. for i = 1, test_size do
  34. valid_x[i] = x[index[train_size + i]]
  35. end
  36. return train_x, valid_x
  37. end
  38. local function make_validation_set(x, transformer, n, patches)
  39. n = n or 4
  40. local data = {}
  41. for i = 1, #x do
  42. for k = 1, math.max(n / patches, 1) do
  43. local xy = transformer(x[i], true, patches)
  44. for j = 1, #xy do
  45. table.insert(data, {x = xy[j][1], y = xy[j][2]})
  46. end
  47. end
  48. xlua.progress(i, #x)
  49. collectgarbage()
  50. end
  51. local new_data = {}
  52. local perm = torch.randperm(#data)
  53. for i = 1, perm:size(1) do
  54. new_data[i] = data[perm[i]]
  55. end
  56. data = new_data
  57. return data
  58. end
  59. local function validate(model, criterion, data, batch_size)
  60. local loss = 0
  61. local loss_count = 0
  62. local inputs_tmp = torch.Tensor(batch_size,
  63. data[1].x:size(1),
  64. data[1].x:size(2),
  65. data[1].x:size(3)):zero()
  66. local targets_tmp = torch.Tensor(batch_size,
  67. data[1].y:size(1),
  68. data[1].y:size(2),
  69. data[1].y:size(3)):zero()
  70. local inputs = inputs_tmp:clone():cuda()
  71. local targets = targets_tmp:clone():cuda()
  72. for t = 1, #data, batch_size do
  73. if t + batch_size -1 > #data then
  74. break
  75. end
  76. for i = 1, batch_size do
  77. inputs_tmp[i]:copy(data[t + i - 1].x)
  78. targets_tmp[i]:copy(data[t + i - 1].y)
  79. end
  80. inputs:copy(inputs_tmp)
  81. targets:copy(targets_tmp)
  82. local z = model:forward(inputs)
  83. loss = loss + criterion:forward(z, targets)
  84. loss_count = loss_count + 1
  85. if loss_count % 10 == 0 then
  86. xlua.progress(t, #data)
  87. collectgarbage()
  88. end
  89. end
  90. xlua.progress(#data, #data)
  91. return loss / loss_count
  92. end
  93. local function create_criterion(model)
  94. if reconstruct.is_rgb(model) then
  95. local offset = reconstruct.offset_size(model)
  96. local output_w = settings.crop_size - offset * 2
  97. local weight = torch.Tensor(3, output_w * output_w)
  98. weight[1]:fill(0.29891 * 3) -- R
  99. weight[2]:fill(0.58661 * 3) -- G
  100. weight[3]:fill(0.11448 * 3) -- B
  101. return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()
  102. else
  103. local offset = reconstruct.offset_size(model)
  104. local output_w = settings.crop_size - offset * 2
  105. local weight = torch.Tensor(1, output_w * output_w)
  106. weight[1]:fill(1.0)
  107. return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()
  108. end
  109. end
  110. local function transformer(model, x, is_validation, n, offset)
  111. x = compression.decompress(x)
  112. n = n or settings.patches
  113. if is_validation == nil then is_validation = false end
  114. local random_color_noise_rate = nil
  115. local random_overlay_rate = nil
  116. local active_cropping_rate = nil
  117. local active_cropping_tries = nil
  118. if is_validation then
  119. active_cropping_rate = settings.active_cropping_rate
  120. active_cropping_tries = settings.active_cropping_tries
  121. random_color_noise_rate = 0.0
  122. random_overlay_rate = 0.0
  123. else
  124. active_cropping_rate = settings.active_cropping_rate
  125. active_cropping_tries = settings.active_cropping_tries
  126. random_color_noise_rate = settings.random_color_noise_rate
  127. random_overlay_rate = settings.random_overlay_rate
  128. end
  129. if settings.method == "scale" then
  130. return pairwise_transform.scale(x,
  131. settings.scale,
  132. settings.crop_size, offset,
  133. n,
  134. {
  135. downsampling_filters = settings.downsampling_filters,
  136. upsampling_filter = settings.upsampling_filter,
  137. random_half_rate = settings.random_half_rate,
  138. random_color_noise_rate = random_color_noise_rate,
  139. random_overlay_rate = random_overlay_rate,
  140. random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
  141. max_size = settings.max_size,
  142. active_cropping_rate = active_cropping_rate,
  143. active_cropping_tries = active_cropping_tries,
  144. rgb = (settings.color == "rgb"),
  145. gamma_correction = settings.gamma_correction,
  146. x_upsampling = not reconstruct.has_resize(model)
  147. })
  148. elseif settings.method == "noise" then
  149. return pairwise_transform.jpeg(x,
  150. settings.style,
  151. settings.noise_level,
  152. settings.crop_size, offset,
  153. n,
  154. {
  155. random_half_rate = settings.random_half_rate,
  156. random_color_noise_rate = random_color_noise_rate,
  157. random_overlay_rate = random_overlay_rate,
  158. random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
  159. max_size = settings.max_size,
  160. jpeg_chroma_subsampling_rate = settings.jpeg_chroma_subsampling_rate,
  161. active_cropping_rate = active_cropping_rate,
  162. active_cropping_tries = active_cropping_tries,
  163. nr_rate = settings.nr_rate,
  164. rgb = (settings.color == "rgb")
  165. })
  166. end
  167. end
  168. local function resampling(x, y, train_x, transformer, input_size, target_size)
  169. print("## resampling")
  170. for t = 1, #train_x do
  171. xlua.progress(t, #train_x)
  172. local xy = transformer(train_x[t], false, settings.patches)
  173. for i = 1, #xy do
  174. local index = (t - 1) * settings.patches + i
  175. x[index]:copy(xy[i][1])
  176. y[index]:copy(xy[i][2])
  177. end
  178. if t % 50 == 0 then
  179. collectgarbage()
  180. end
  181. end
  182. end
  183. local function remove_small_image(x)
  184. local new_x = {}
  185. for i = 1, #x do
  186. local x_s = compression.size(x[i])
  187. if x_s[2] / settings.scale > settings.crop_size + 16 and
  188. x_s[3] / settings.scale > settings.crop_size + 16 then
  189. table.insert(new_x, x[i])
  190. end
  191. if i % 100 == 0 then
  192. collectgarbage()
  193. end
  194. end
  195. print(string.format("removed %d small images", #x - #new_x))
  196. return new_x
  197. end
  198. local function plot(train, valid)
  199. gnuplot.plot({
  200. {'training', torch.Tensor(train), '-'},
  201. {'validation', torch.Tensor(valid), '-'}})
  202. end
  203. local function train()
  204. local hist_train = {}
  205. local hist_valid = {}
  206. local model = srcnn.create(settings.model, settings.backend, settings.color)
  207. local offset = reconstruct.offset_size(model)
  208. local pairwise_func = function(x, is_validation, n)
  209. return transformer(model, x, is_validation, n, offset)
  210. end
  211. local criterion = create_criterion(model)
  212. local eval_metric = nn.MSECriterion():cuda()
  213. local x = remove_small_image(torch.load(settings.images))
  214. local train_x, valid_x = split_data(x, math.max(math.floor(settings.validation_rate * #x), 1))
  215. local adam_config = {
  216. learningRate = settings.learning_rate,
  217. xBatchSize = settings.batch_size,
  218. }
  219. local lrd_count = 0
  220. local ch = nil
  221. if settings.color == "y" then
  222. ch = 1
  223. elseif settings.color == "rgb" then
  224. ch = 3
  225. end
  226. local best_score = 1000.0
  227. print("# make validation-set")
  228. local valid_xy = make_validation_set(valid_x, pairwise_func,
  229. settings.validation_crops,
  230. settings.patches)
  231. valid_x = nil
  232. collectgarbage()
  233. model:cuda()
  234. print("load .. " .. #train_x)
  235. local x = nil
  236. local y = torch.Tensor(settings.patches * #train_x,
  237. ch * (settings.crop_size - offset * 2) * (settings.crop_size - offset * 2)):zero()
  238. if reconstruct.has_resize(model) then
  239. x = torch.Tensor(settings.patches * #train_x,
  240. ch, settings.crop_size / settings.scale, settings.crop_size / settings.scale)
  241. else
  242. x = torch.Tensor(settings.patches * #train_x,
  243. ch, settings.crop_size, settings.crop_size)
  244. end
  245. for epoch = 1, settings.epoch do
  246. model:training()
  247. print("# " .. epoch)
  248. resampling(x, y, train_x, pairwise_func)
  249. for i = 1, settings.inner_epoch do
  250. local train_score = minibatch_adam(model, criterion, eval_metric, x, y, adam_config)
  251. print(train_score)
  252. model:evaluate()
  253. print("# validation")
  254. local score = validate(model, eval_metric, valid_xy, adam_config.xBatchSize)
  255. table.insert(hist_train, train_score.MSE)
  256. table.insert(hist_valid, score)
  257. if settings.plot then
  258. plot(hist_train, hist_valid)
  259. end
  260. if score < best_score then
  261. local test_image = image_loader.load_float(settings.test) -- reload
  262. lrd_count = 0
  263. best_score = score
  264. print("* update best model")
  265. if settings.save_history then
  266. torch.save(string.format(settings.model_file, epoch, i), model:clearState(), "ascii")
  267. if settings.method == "noise" then
  268. local log = path.join(settings.model_dir,
  269. ("noise%d_best.%d-%d.png"):format(settings.noise_level,
  270. epoch, i))
  271. save_test_jpeg(model, test_image, log)
  272. elseif settings.method == "scale" then
  273. local log = path.join(settings.model_dir,
  274. ("scale%.1f_best.%d-%d.png"):format(settings.scale,
  275. epoch, i))
  276. save_test_scale(model, test_image, log)
  277. end
  278. else
  279. torch.save(settings.model_file, model:clearState(), "ascii")
  280. if settings.method == "noise" then
  281. local log = path.join(settings.model_dir,
  282. ("noise%d_best.png"):format(settings.noise_level))
  283. save_test_jpeg(model, test_image, log)
  284. elseif settings.method == "scale" then
  285. local log = path.join(settings.model_dir,
  286. ("scale%.1f_best.png"):format(settings.scale))
  287. save_test_scale(model, test_image, log)
  288. end
  289. end
  290. else
  291. lrd_count = lrd_count + 1
  292. if lrd_count > 2 then
  293. adam_config.learningRate = adam_config.learningRate * 0.8
  294. print("* learning rate decay: " .. adam_config.learningRate)
  295. lrd_count = 0
  296. end
  297. end
  298. print("PSNR: " .. 10 * math.log10(1 / score) .. ", MSE: " .. score .. ", Best MSE: " .. best_score)
  299. collectgarbage()
  300. end
  301. end
  302. end
  303. if settings.gpu > 0 then
  304. cutorch.setDevice(settings.gpu)
  305. end
  306. torch.manualSeed(settings.seed)
  307. cutorch.manualSeed(settings.seed)
  308. print(settings)
  309. train()