train.lua 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. require 'pl'
  2. local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
  3. package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.path
  4. require 'optim'
  5. require 'xlua'
  6. require 'image'
  7. require 'w2nn'
  8. local threads = require 'threads'
  9. local settings = require 'settings'
  10. local srcnn = require 'srcnn'
  11. local minibatch_adam = require 'minibatch_adam'
  12. local iproc = require 'iproc'
  13. local reconstruct = require 'reconstruct'
  14. local image_loader = require 'image_loader'
  15. local function save_test_scale(model, rgb, file)
  16. local up = reconstruct.scale(model, settings.scale, rgb)
  17. image.save(file, up)
  18. end
  19. local function save_test_jpeg(model, rgb, file)
  20. local im, count = reconstruct.image(model, rgb)
  21. image.save(file, im)
  22. end
  23. local function save_test_user(model, rgb, file)
  24. if settings.scale == 1 then
  25. save_test_jpeg(model, rgb, file)
  26. else
  27. save_test_scale(model, rgb, file)
  28. end
  29. end
  30. local function split_data(x, test_size)
  31. local index = torch.randperm(#x)
  32. local train_size = #x - test_size
  33. local train_x = {}
  34. local valid_x = {}
  35. for i = 1, train_size do
  36. train_x[i] = x[index[i]]
  37. end
  38. for i = 1, test_size do
  39. valid_x[i] = x[index[train_size + i]]
  40. end
  41. return train_x, valid_x
  42. end
  43. local g_transform_pool = nil
  44. local function transform_pool_init(has_resize, offset)
  45. local nthread = torch.getnumthreads()
  46. if (settings.thread > 0) then
  47. nthread = settings.thread
  48. end
  49. g_transform_pool = threads.Threads(
  50. nthread,
  51. function(threadid)
  52. require 'pl'
  53. local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
  54. package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.path
  55. require 'nn'
  56. require 'cunn'
  57. local compression = require 'compression'
  58. local pairwise_transform = require 'pairwise_transform'
  59. function transformer(x, is_validation, n)
  60. local meta = {data = {}}
  61. local y = nil
  62. if type(x) == "table" and type(x[2]) == "table" then
  63. meta = x[2]
  64. if x[1].x and x[1].y then
  65. y = compression.decompress(x[1].y)
  66. x = compression.decompress(x[1].x)
  67. else
  68. x = compression.decompress(x[1])
  69. end
  70. else
  71. x = compression.decompress(x)
  72. end
  73. n = n or settings.patches
  74. if is_validation == nil then is_validation = false end
  75. local random_color_noise_rate = nil
  76. local random_overlay_rate = nil
  77. local active_cropping_rate = nil
  78. local active_cropping_tries = nil
  79. if is_validation then
  80. active_cropping_rate = settings.active_cropping_rate
  81. active_cropping_tries = settings.active_cropping_tries
  82. random_color_noise_rate = 0.0
  83. random_overlay_rate = 0.0
  84. else
  85. active_cropping_rate = settings.active_cropping_rate
  86. active_cropping_tries = settings.active_cropping_tries
  87. random_color_noise_rate = settings.random_color_noise_rate
  88. random_overlay_rate = settings.random_overlay_rate
  89. end
  90. if settings.method == "scale" then
  91. local conf = tablex.update({
  92. downsampling_filters = settings.downsampling_filters,
  93. random_half_rate = settings.random_half_rate,
  94. random_color_noise_rate = random_color_noise_rate,
  95. random_overlay_rate = random_overlay_rate,
  96. random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
  97. max_size = settings.max_size,
  98. active_cropping_rate = active_cropping_rate,
  99. active_cropping_tries = active_cropping_tries,
  100. rgb = (settings.color == "rgb"),
  101. x_upsampling = not has_resize,
  102. resize_blur_min = settings.resize_blur_min,
  103. resize_blur_max = settings.resize_blur_max}, meta)
  104. return pairwise_transform.scale(x,
  105. settings.scale,
  106. settings.crop_size, offset,
  107. n, conf)
  108. elseif settings.method == "noise" then
  109. local conf = tablex.update({
  110. random_half_rate = settings.random_half_rate,
  111. random_color_noise_rate = random_color_noise_rate,
  112. random_overlay_rate = random_overlay_rate,
  113. random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
  114. max_size = settings.max_size,
  115. jpeg_chroma_subsampling_rate = settings.jpeg_chroma_subsampling_rate,
  116. active_cropping_rate = active_cropping_rate,
  117. active_cropping_tries = active_cropping_tries,
  118. nr_rate = settings.nr_rate,
  119. rgb = (settings.color == "rgb")}, meta)
  120. return pairwise_transform.jpeg(x,
  121. settings.style,
  122. settings.noise_level,
  123. settings.crop_size, offset,
  124. n, conf)
  125. elseif settings.method == "noise_scale" then
  126. local conf = tablex.update({
  127. downsampling_filters = settings.downsampling_filters,
  128. random_half_rate = settings.random_half_rate,
  129. random_color_noise_rate = random_color_noise_rate,
  130. random_overlay_rate = random_overlay_rate,
  131. random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
  132. max_size = settings.max_size,
  133. jpeg_chroma_subsampling_rate = settings.jpeg_chroma_subsampling_rate,
  134. nr_rate = settings.nr_rate,
  135. active_cropping_rate = active_cropping_rate,
  136. active_cropping_tries = active_cropping_tries,
  137. rgb = (settings.color == "rgb"),
  138. x_upsampling = not has_resize,
  139. resize_blur_min = settings.resize_blur_min,
  140. resize_blur_max = settings.resize_blur_max}, meta)
  141. return pairwise_transform.jpeg_scale(x,
  142. settings.scale,
  143. settings.style,
  144. settings.noise_level,
  145. settings.crop_size, offset,
  146. n, conf)
  147. elseif settings.method == "user" then
  148. local conf = tablex.update({
  149. max_size = settings.max_size,
  150. active_cropping_rate = active_cropping_rate,
  151. active_cropping_tries = active_cropping_tries,
  152. rgb = (settings.color == "rgb")}, meta)
  153. return pairwise_transform.user(x, y,
  154. settings.crop_size, offset,
  155. n, conf)
  156. end
  157. end
  158. end
  159. )
  160. g_transform_pool:synchronize()
  161. end
  162. local function make_validation_set(x, n, patches)
  163. local nthread = torch.getnumthreads()
  164. if (settings.thread > 0) then
  165. nthread = settings.thread
  166. end
  167. n = n or 4
  168. local validation_patches = math.min(16, patches or 16)
  169. local data = {}
  170. g_transform_pool:synchronize()
  171. torch.setnumthreads(1) -- 1
  172. for i = 1, #x do
  173. for k = 1, math.max(n / validation_patches, 1) do
  174. local input = x[i]
  175. g_transform_pool:addjob(
  176. function()
  177. local xy = transformer(input, true, validation_patches)
  178. collectgarbage()
  179. return xy
  180. end,
  181. function(xy)
  182. for j = 1, #xy do
  183. table.insert(data, {x = xy[j][1], y = xy[j][2]})
  184. end
  185. end
  186. )
  187. end
  188. g_transform_pool:synchronize()
  189. xlua.progress(i, #x)
  190. end
  191. g_transform_pool:synchronize()
  192. torch.setnumthreads(nthread) -- revert
  193. local new_data = {}
  194. local perm = torch.randperm(#data)
  195. for i = 1, perm:size(1) do
  196. new_data[i] = data[perm[i]]
  197. end
  198. data = new_data
  199. return data
  200. end
  201. local function validate(model, criterion, eval_metric, data, batch_size)
  202. local loss = 0
  203. local mse = 0
  204. local loss_count = 0
  205. local inputs_tmp = torch.Tensor(batch_size,
  206. data[1].x:size(1),
  207. data[1].x:size(2),
  208. data[1].x:size(3)):zero()
  209. local targets_tmp = torch.Tensor(batch_size,
  210. data[1].y:size(1),
  211. data[1].y:size(2),
  212. data[1].y:size(3)):zero()
  213. local inputs = inputs_tmp:clone():cuda()
  214. local targets = targets_tmp:clone():cuda()
  215. for t = 1, #data, batch_size do
  216. if t + batch_size -1 > #data then
  217. break
  218. end
  219. for i = 1, batch_size do
  220. inputs_tmp[i]:copy(data[t + i - 1].x)
  221. targets_tmp[i]:copy(data[t + i - 1].y)
  222. end
  223. inputs:copy(inputs_tmp)
  224. targets:copy(targets_tmp)
  225. local z = model:forward(inputs)
  226. loss = loss + criterion:forward(z, targets)
  227. mse = mse + eval_metric:forward(z, targets)
  228. loss_count = loss_count + 1
  229. if loss_count % 10 == 0 then
  230. xlua.progress(t, #data)
  231. collectgarbage()
  232. end
  233. end
  234. xlua.progress(#data, #data)
  235. return {loss = loss / loss_count, MSE = mse / loss_count, PSNR = 10 * math.log10(1 / (mse / loss_count))}
  236. end
  237. local function create_criterion(model)
  238. if reconstruct.is_rgb(model) then
  239. local offset = reconstruct.offset_size(model)
  240. local output_w = settings.crop_size - offset * 2
  241. local weight = torch.Tensor(3, output_w * output_w)
  242. weight[1]:fill(0.29891 * 3) -- R
  243. weight[2]:fill(0.58661 * 3) -- G
  244. weight[3]:fill(0.11448 * 3) -- B
  245. return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()
  246. else
  247. local offset = reconstruct.offset_size(model)
  248. local output_w = settings.crop_size - offset * 2
  249. local weight = torch.Tensor(1, output_w * output_w)
  250. weight[1]:fill(1.0)
  251. return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()
  252. end
  253. end
  254. local function resampling(x, y, train_x)
  255. local c = 1
  256. local shuffle = torch.randperm(#train_x)
  257. local nthread = torch.getnumthreads()
  258. if (settings.thread > 0) then
  259. nthread = settings.thread
  260. end
  261. torch.setnumthreads(1) -- 1
  262. for t = 1, #train_x do
  263. local input = train_x[shuffle[t]]
  264. g_transform_pool:addjob(
  265. function()
  266. local xy = transformer(input, false, settings.patches)
  267. return xy
  268. end,
  269. function(xy)
  270. for i = 1, #xy do
  271. if c <= x:size(1) then
  272. x[c]:copy(xy[i][1])
  273. y[c]:copy(xy[i][2])
  274. c = c + 1
  275. else
  276. break
  277. end
  278. end
  279. end
  280. )
  281. if t % 50 == 0 then
  282. xlua.progress(t, #train_x)
  283. g_transform_pool:synchronize()
  284. collectgarbage()
  285. end
  286. if c > x:size(1) then
  287. break
  288. end
  289. end
  290. g_transform_pool:synchronize()
  291. xlua.progress(#train_x, #train_x)
  292. torch.setnumthreads(nthread) -- revert
  293. end
  294. local function get_oracle_data(x, y, instance_loss, k, samples)
  295. local index = torch.LongTensor(instance_loss:size(1))
  296. local dummy = torch.Tensor(instance_loss:size(1))
  297. torch.topk(dummy, index, instance_loss, k, 1, true)
  298. print("MSE of all data: " ..instance_loss:mean() .. ", MSE of oracle data: " .. dummy:mean())
  299. local shuffle = torch.randperm(k)
  300. local x_s = x:size()
  301. local y_s = y:size()
  302. x_s[1] = samples
  303. y_s[1] = samples
  304. local oracle_x = torch.Tensor(table.unpack(torch.totable(x_s)))
  305. local oracle_y = torch.Tensor(table.unpack(torch.totable(y_s)))
  306. for i = 1, samples do
  307. oracle_x[i]:copy(x[index[shuffle[i]]])
  308. oracle_y[i]:copy(y[index[shuffle[i]]])
  309. end
  310. return oracle_x, oracle_y
  311. end
  312. local function remove_small_image(x)
  313. local compression = require 'compression'
  314. local new_x = {}
  315. for i = 1, #x do
  316. local xe, meta, x_s
  317. xe = x[i]
  318. if type(x) == "table" and type(x[2]) == "table" then
  319. if xe[1].x and xe[1].y then
  320. x_s = compression.size(xe[1].y) -- y size
  321. else
  322. x_s = compression.size(xe[1])
  323. end
  324. else
  325. x_s = compression.size(xe)
  326. end
  327. if x_s[2] / settings.scale > settings.crop_size + 32 and
  328. x_s[3] / settings.scale > settings.crop_size + 32 then
  329. table.insert(new_x, x[i])
  330. end
  331. if i % 100 == 0 then
  332. collectgarbage()
  333. end
  334. end
  335. print(string.format("%d small images are removed", #x - #new_x))
  336. return new_x
  337. end
  338. local function plot(train, valid)
  339. gnuplot.plot({
  340. {'training', torch.Tensor(train), '-'},
  341. {'validation', torch.Tensor(valid), '-'}})
  342. end
  343. local function train()
  344. local hist_train = {}
  345. local hist_valid = {}
  346. local model
  347. if settings.resume:len() > 0 then
  348. model = torch.load(settings.resume, "ascii")
  349. else
  350. model = srcnn.create(settings.model, settings.backend, settings.color)
  351. end
  352. dir.makepath(settings.model_dir)
  353. local offset = reconstruct.offset_size(model)
  354. transform_pool_init(reconstruct.has_resize(model), offset)
  355. local criterion = create_criterion(model)
  356. local eval_metric = w2nn.ClippedMSECriterion(0, 1):cuda()
  357. local x = remove_small_image(torch.load(settings.images))
  358. local train_x, valid_x = split_data(x, math.max(math.floor(settings.validation_rate * #x), 1))
  359. local adam_config = {
  360. xLearningRate = settings.learning_rate,
  361. xBatchSize = settings.batch_size,
  362. xLearningRateDecay = settings.learning_rate_decay,
  363. xInstanceLoss = (settings.oracle_rate > 0)
  364. }
  365. local ch = nil
  366. if settings.color == "y" then
  367. ch = 1
  368. elseif settings.color == "rgb" then
  369. ch = 3
  370. end
  371. local best_score = 1000.0
  372. print("# make validation-set")
  373. local valid_xy = make_validation_set(valid_x,
  374. settings.validation_crops,
  375. settings.patches)
  376. valid_x = nil
  377. collectgarbage()
  378. model:cuda()
  379. print("load .. " .. #train_x)
  380. local x = nil
  381. local y = torch.Tensor(settings.patches * #train_x,
  382. ch * (settings.crop_size - offset * 2) * (settings.crop_size - offset * 2)):zero()
  383. if reconstruct.has_resize(model) then
  384. x = torch.Tensor(settings.patches * #train_x,
  385. ch, settings.crop_size / settings.scale, settings.crop_size / settings.scale)
  386. else
  387. x = torch.Tensor(settings.patches * #train_x,
  388. ch, settings.crop_size, settings.crop_size)
  389. end
  390. local instance_loss = nil
  391. for epoch = 1, settings.epoch do
  392. model:training()
  393. print("# " .. epoch)
  394. if adam_config.learningRate then
  395. print("learning rate: " .. adam_config.learningRate)
  396. end
  397. print("## resampling")
  398. if instance_loss then
  399. -- active learning
  400. local oracle_k = math.min(x:size(1) * (settings.oracle_rate * (1 / (1 - settings.oracle_drop_rate))), x:size(1))
  401. local oracle_n = math.min(x:size(1) * settings.oracle_rate, x:size(1))
  402. if oracle_n > 0 then
  403. local oracle_x, oracle_y = get_oracle_data(x, y, instance_loss, oracle_k, oracle_n)
  404. resampling(x:narrow(1, oracle_x:size(1) + 1, x:size(1)-oracle_x:size(1)),
  405. y:narrow(1, oracle_x:size(1) + 1, x:size(1) - oracle_x:size(1)), train_x)
  406. x:narrow(1, 1, oracle_x:size(1)):copy(oracle_x)
  407. y:narrow(1, 1, oracle_y:size(1)):copy(oracle_y)
  408. local draw_n = math.floor(math.sqrt(oracle_x:size(1), 0.5))
  409. if draw_n > 100 then
  410. draw_n = 100
  411. end
  412. image.save(path.join(settings.model_dir, "oracle_x.png"),
  413. image.toDisplayTensor({
  414. input = oracle_x:narrow(1, 1, draw_n * draw_n),
  415. padding = 2,
  416. nrow = draw_n,
  417. min = 0,
  418. max = 1}))
  419. else
  420. resampling(x, y, train_x)
  421. end
  422. else
  423. resampling(x, y, train_x, pairwise_func)
  424. end
  425. collectgarbage()
  426. instance_loss = torch.Tensor(x:size(1)):zero()
  427. for i = 1, settings.inner_epoch do
  428. model:training()
  429. local train_score, il = minibatch_adam(model, criterion, eval_metric, x, y, adam_config)
  430. instance_loss:copy(il)
  431. print(train_score)
  432. model:evaluate()
  433. print("# validation")
  434. local score = validate(model, criterion, eval_metric, valid_xy, adam_config.xBatchSize)
  435. table.insert(hist_train, train_score.loss)
  436. table.insert(hist_valid, score.loss)
  437. if settings.plot then
  438. plot(hist_train, hist_valid)
  439. end
  440. if score.MSE < best_score then
  441. local test_image = image_loader.load_float(settings.test) -- reload
  442. best_score = score.MSE
  443. print("* model has updated")
  444. if settings.save_history then
  445. torch.save(settings.model_file_best, model:clearState(), "ascii")
  446. torch.save(string.format(settings.model_file, epoch, i), model:clearState(), "ascii")
  447. if settings.method == "noise" then
  448. local log = path.join(settings.model_dir,
  449. ("noise%d_best.%d-%d.png"):format(settings.noise_level,
  450. epoch, i))
  451. save_test_jpeg(model, test_image, log)
  452. elseif settings.method == "scale" then
  453. local log = path.join(settings.model_dir,
  454. ("scale%.1f_best.%d-%d.png"):format(settings.scale,
  455. epoch, i))
  456. save_test_scale(model, test_image, log)
  457. elseif settings.method == "noise_scale" then
  458. local log = path.join(settings.model_dir,
  459. ("noise%d_scale%.1f_best.%d-%d.png"):format(settings.noise_level,
  460. settings.scale,
  461. epoch, i))
  462. save_test_scale(model, test_image, log)
  463. elseif settings.method == "user" then
  464. local log = path.join(settings.model_dir,
  465. ("%s_best.%d-%d.png"):format(settings.name,
  466. epoch, i))
  467. save_test_user(model, test_image, log)
  468. end
  469. else
  470. torch.save(settings.model_file, model:clearState(), "ascii")
  471. if settings.method == "noise" then
  472. local log = path.join(settings.model_dir,
  473. ("noise%d_best.png"):format(settings.noise_level))
  474. save_test_jpeg(model, test_image, log)
  475. elseif settings.method == "scale" then
  476. local log = path.join(settings.model_dir,
  477. ("scale%.1f_best.png"):format(settings.scale))
  478. save_test_scale(model, test_image, log)
  479. elseif settings.method == "noise_scale" then
  480. local log = path.join(settings.model_dir,
  481. ("noise%d_scale%.1f_best.png"):format(settings.noise_level,
  482. settings.scale))
  483. save_test_scale(model, test_image, log)
  484. elseif settings.method == "user" then
  485. local log = path.join(settings.model_dir,
  486. ("%s_best.png"):format(settings.name))
  487. save_test_user(model, test_image, log)
  488. end
  489. end
  490. end
  491. print("Batch-wise PSNR: " .. score.PSNR .. ", loss: " .. score.loss .. ", MSE: " .. score.MSE .. ", Minimum MSE: " .. best_score)
  492. collectgarbage()
  493. end
  494. end
  495. end
  496. if settings.gpu > 0 then
  497. cutorch.setDevice(settings.gpu)
  498. end
  499. torch.manualSeed(settings.seed)
  500. cutorch.manualSeed(settings.seed)
  501. print(settings)
  502. train()