train.lua 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. require 'pl'
  2. local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
  3. package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.path
  4. require 'optim'
  5. require 'xlua'
  6. require 'image'
  7. require 'w2nn'
  8. local threads = require 'threads'
  9. local settings = require 'settings'
  10. local srcnn = require 'srcnn'
  11. local minibatch_adam = require 'minibatch_adam'
  12. local iproc = require 'iproc'
  13. local reconstruct = require 'reconstruct'
  14. local image_loader = require 'image_loader'
  15. local function save_test_scale(model, rgb, file)
  16. local up = reconstruct.scale(model, settings.scale, rgb)
  17. image.save(file, up)
  18. end
  19. local function save_test_jpeg(model, rgb, file)
  20. local im, count = reconstruct.image(model, rgb)
  21. image.save(file, im)
  22. end
  23. local function save_test_user(model, rgb, file)
  24. if settings.scale == 1 then
  25. save_test_jpeg(model, rgb, file)
  26. else
  27. save_test_scale(model, rgb, file)
  28. end
  29. end
  30. local function split_data(x, test_size)
  31. local index = torch.randperm(#x)
  32. local train_size = #x - test_size
  33. local train_x = {}
  34. local valid_x = {}
  35. for i = 1, train_size do
  36. train_x[i] = x[index[i]]
  37. end
  38. for i = 1, test_size do
  39. valid_x[i] = x[index[train_size + i]]
  40. end
  41. return train_x, valid_x
  42. end
  43. local g_transform_pool = nil
  44. local g_mutex = nil
  45. local g_mutex_id = nil
  46. local function transform_pool_init(has_resize, offset)
  47. local nthread = torch.getnumthreads()
  48. if (settings.thread > 0) then
  49. nthread = settings.thread
  50. end
  51. g_mutex = threads.Mutex()
  52. g_mutex_id = g_mutex:id()
  53. g_transform_pool = threads.Threads(
  54. nthread,
  55. threads.safe(
  56. function(threadid)
  57. require 'pl'
  58. local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
  59. package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.path
  60. require 'torch'
  61. require 'nn'
  62. require 'cunn'
  63. torch.setnumthreads(1)
  64. torch.setdefaulttensortype("torch.FloatTensor")
  65. local threads = require 'threads'
  66. local compression = require 'compression'
  67. local pairwise_transform = require 'pairwise_transform'
  68. function transformer(x, is_validation, n)
  69. local mutex = threads.Mutex(g_mutex_id)
  70. local meta = {data = {}}
  71. local y = nil
  72. if type(x) == "table" and type(x[2]) == "table" then
  73. meta = x[2]
  74. if x[1].x and x[1].y then
  75. y = compression.decompress(x[1].y)
  76. x = compression.decompress(x[1].x)
  77. else
  78. x = compression.decompress(x[1])
  79. end
  80. else
  81. x = compression.decompress(x)
  82. end
  83. n = n or settings.patches
  84. if is_validation == nil then is_validation = false end
  85. local random_color_noise_rate = nil
  86. local random_overlay_rate = nil
  87. local active_cropping_rate = nil
  88. local active_cropping_tries = nil
  89. if is_validation then
  90. active_cropping_rate = settings.active_cropping_rate
  91. active_cropping_tries = settings.active_cropping_tries
  92. random_color_noise_rate = 0.0
  93. random_overlay_rate = 0.0
  94. else
  95. active_cropping_rate = settings.active_cropping_rate
  96. active_cropping_tries = settings.active_cropping_tries
  97. random_color_noise_rate = settings.random_color_noise_rate
  98. random_overlay_rate = settings.random_overlay_rate
  99. end
  100. if settings.method == "scale" then
  101. local conf = tablex.update({
  102. mutex = mutex,
  103. downsampling_filters = settings.downsampling_filters,
  104. random_half_rate = settings.random_half_rate,
  105. random_color_noise_rate = random_color_noise_rate,
  106. random_overlay_rate = random_overlay_rate,
  107. random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
  108. random_blur_rate = settings.random_blur_rate,
  109. random_blur_size = settings.random_blur_size,
  110. random_blur_sigma_min = settings.random_blur_sigma_min,
  111. random_blur_sigma_max = settings.random_blur_sigma_max,
  112. max_size = settings.max_size,
  113. active_cropping_rate = active_cropping_rate,
  114. active_cropping_tries = active_cropping_tries,
  115. rgb = (settings.color == "rgb"),
  116. x_upsampling = not has_resize,
  117. resize_blur_min = settings.resize_blur_min,
  118. resize_blur_max = settings.resize_blur_max}, meta)
  119. return pairwise_transform.scale(x,
  120. settings.scale,
  121. settings.crop_size, offset,
  122. n, conf)
  123. elseif settings.method == "noise" then
  124. local conf = tablex.update({
  125. mutex = mutex,
  126. random_half_rate = settings.random_half_rate,
  127. random_color_noise_rate = random_color_noise_rate,
  128. random_overlay_rate = random_overlay_rate,
  129. random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
  130. random_blur_rate = settings.random_blur_rate,
  131. random_blur_size = settings.random_blur_size,
  132. random_blur_sigma_min = settings.random_blur_sigma_min,
  133. random_blur_sigma_max = settings.random_blur_sigma_max,
  134. max_size = settings.max_size,
  135. jpeg_chroma_subsampling_rate = settings.jpeg_chroma_subsampling_rate,
  136. active_cropping_rate = active_cropping_rate,
  137. active_cropping_tries = active_cropping_tries,
  138. nr_rate = settings.nr_rate,
  139. rgb = (settings.color == "rgb")}, meta)
  140. return pairwise_transform.jpeg(x,
  141. settings.style,
  142. settings.noise_level,
  143. settings.crop_size, offset,
  144. n, conf)
  145. elseif settings.method == "noise_scale" then
  146. local conf = tablex.update({
  147. mutex = mutex,
  148. downsampling_filters = settings.downsampling_filters,
  149. random_half_rate = settings.random_half_rate,
  150. random_color_noise_rate = random_color_noise_rate,
  151. random_overlay_rate = random_overlay_rate,
  152. random_unsharp_mask_rate = settings.random_unsharp_mask_rate,
  153. random_blur_rate = settings.random_blur_rate,
  154. random_blur_size = settings.random_blur_size,
  155. random_blur_sigma_min = settings.random_blur_sigma_min,
  156. random_blur_sigma_max = settings.random_blur_sigma_max,
  157. max_size = settings.max_size,
  158. jpeg_chroma_subsampling_rate = settings.jpeg_chroma_subsampling_rate,
  159. nr_rate = settings.nr_rate,
  160. active_cropping_rate = active_cropping_rate,
  161. active_cropping_tries = active_cropping_tries,
  162. rgb = (settings.color == "rgb"),
  163. x_upsampling = not has_resize,
  164. resize_blur_min = settings.resize_blur_min,
  165. resize_blur_max = settings.resize_blur_max}, meta)
  166. return pairwise_transform.jpeg_scale(x,
  167. settings.scale,
  168. settings.style,
  169. settings.noise_level,
  170. settings.crop_size, offset,
  171. n, conf)
  172. elseif settings.method == "user" then
  173. if is_validation == nil then is_validation = false end
  174. local rotate_rate = nil
  175. local scale_rate = nil
  176. local negate_rate = nil
  177. local negate_x_rate = nil
  178. if is_validation then
  179. rotate_rate = 0
  180. scale_rate = 0
  181. negate_rate = 0
  182. negate_x_rate = 0
  183. else
  184. rotate_rate = settings.random_pairwise_rotate_rate
  185. scale_rate = settings.random_pairwise_scale_rate
  186. negate_rate = settings.random_pairwise_negate_rate
  187. negate_x_rate = settings.random_pairwise_negate_x_rate
  188. end
  189. local conf = tablex.update({
  190. gcn = settings.gcn,
  191. max_size = settings.max_size,
  192. active_cropping_rate = active_cropping_rate,
  193. active_cropping_tries = active_cropping_tries,
  194. random_pairwise_rotate_rate = rotate_rate,
  195. random_pairwise_rotate_min = settings.random_pairwise_rotate_min,
  196. random_pairwise_rotate_max = settings.random_pairwise_rotate_max,
  197. random_pairwise_scale_rate = scale_rate,
  198. random_pairwise_scale_min = settings.random_pairwise_scale_min,
  199. random_pairwise_scale_max = settings.random_pairwise_scale_max,
  200. random_pairwise_negate_rate = negate_rate,
  201. random_pairwise_negate_x_rate = negate_x_rate,
  202. pairwise_y_binary = settings.pairwise_y_binary,
  203. pairwise_flip = settings.pairwise_flip,
  204. rgb = (settings.color == "rgb")}, meta)
  205. return pairwise_transform.user(x, y,
  206. settings.crop_size, offset,
  207. n, conf)
  208. end
  209. end
  210. end)
  211. )
  212. g_transform_pool:synchronize()
  213. end
  214. local function make_validation_set(x, n, patches)
  215. local nthread = torch.getnumthreads()
  216. if (settings.thread > 0) then
  217. nthread = settings.thread
  218. end
  219. n = n or 4
  220. local validation_patches = math.min(16, patches or 16)
  221. local data = {}
  222. g_transform_pool:synchronize()
  223. torch.setnumthreads(1) -- 1
  224. for i = 1, #x do
  225. for k = 1, math.max(n / validation_patches, 1) do
  226. local input = x[i]
  227. g_transform_pool:addjob(
  228. function()
  229. local xy = transformer(input, true, validation_patches)
  230. return xy
  231. end,
  232. function(xy)
  233. for j = 1, #xy do
  234. table.insert(data, {x = xy[j][1], y = xy[j][2]})
  235. end
  236. end
  237. )
  238. end
  239. if i % 20 == 0 then
  240. collectgarbage()
  241. g_transform_pool:synchronize()
  242. xlua.progress(i, #x)
  243. end
  244. end
  245. g_transform_pool:synchronize()
  246. torch.setnumthreads(nthread) -- revert
  247. local new_data = {}
  248. local perm = torch.randperm(#data)
  249. for i = 1, perm:size(1) do
  250. new_data[i] = data[perm[i]]
  251. end
  252. data = new_data
  253. return data
  254. end
  255. local function validate(model, criterion, eval_metric, data, batch_size)
  256. local psnr = 0
  257. local loss = 0
  258. local mse = 0
  259. local loss_count = 0
  260. local inputs_tmp = torch.Tensor(batch_size,
  261. data[1].x:size(1),
  262. data[1].x:size(2),
  263. data[1].x:size(3)):zero()
  264. local targets_tmp = torch.Tensor(batch_size,
  265. data[1].y:size(1),
  266. data[1].y:size(2),
  267. data[1].y:size(3)):zero()
  268. local inputs = inputs_tmp:clone():cuda()
  269. local targets = targets_tmp:clone():cuda()
  270. for t = 1, #data, batch_size do
  271. if t + batch_size -1 > #data then
  272. break
  273. end
  274. for i = 1, batch_size do
  275. inputs_tmp[i]:copy(data[t + i - 1].x)
  276. targets_tmp[i]:copy(data[t + i - 1].y)
  277. end
  278. inputs:copy(inputs_tmp)
  279. targets:copy(targets_tmp)
  280. local z = model:forward(inputs)
  281. local batch_mse = eval_metric:forward(z, targets)
  282. loss = loss + criterion:forward(z, targets)
  283. mse = mse + batch_mse
  284. psnr = psnr + (10 * math.log10(1 / (batch_mse + 1.0e-6)))
  285. loss_count = loss_count + 1
  286. if loss_count % 10 == 0 then
  287. xlua.progress(t, #data)
  288. collectgarbage()
  289. end
  290. end
  291. xlua.progress(#data, #data)
  292. return {loss = loss / loss_count, MSE = mse / loss_count, PSNR = psnr / loss_count}
  293. end
  294. local function create_criterion(model)
  295. if settings.loss == "huber" then
  296. if reconstruct.is_rgb(model) then
  297. local offset = reconstruct.offset_size(model)
  298. local output_w = settings.crop_size - offset * 2
  299. local weight = torch.Tensor(3, output_w * output_w)
  300. weight[1]:fill(0.29891 * 3) -- R
  301. weight[2]:fill(0.58661 * 3) -- G
  302. weight[3]:fill(0.11448 * 3) -- B
  303. return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()
  304. else
  305. local offset = reconstruct.offset_size(model)
  306. local output_w = settings.crop_size - offset * 2
  307. local weight = torch.Tensor(1, output_w * output_w)
  308. weight[1]:fill(1.0)
  309. return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()
  310. end
  311. elseif settings.loss == "l1" then
  312. return w2nn.L1Criterion():cuda()
  313. elseif settings.loss == "mse" then
  314. return w2nn.ClippedMSECriterion(0, 1.0):cuda()
  315. else
  316. error("unsupported loss .." .. settings.loss)
  317. end
  318. end
  319. local function resampling(x, y, train_x)
  320. local c = 1
  321. local shuffle = torch.randperm(#train_x)
  322. local nthread = torch.getnumthreads()
  323. if (settings.thread > 0) then
  324. nthread = settings.thread
  325. end
  326. torch.setnumthreads(1) -- 1
  327. for t = 1, #train_x do
  328. local input = train_x[shuffle[t]]
  329. g_transform_pool:addjob(
  330. function()
  331. local xy = transformer(input, false, settings.patches)
  332. return xy
  333. end,
  334. function(xy)
  335. for i = 1, #xy do
  336. if c <= x:size(1) then
  337. x[c]:copy(xy[i][1])
  338. y[c]:copy(xy[i][2])
  339. c = c + 1
  340. else
  341. break
  342. end
  343. end
  344. end
  345. )
  346. if t % 50 == 0 then
  347. collectgarbage()
  348. g_transform_pool:synchronize()
  349. xlua.progress(t, #train_x)
  350. end
  351. if c > x:size(1) then
  352. break
  353. end
  354. end
  355. g_transform_pool:synchronize()
  356. xlua.progress(#train_x, #train_x)
  357. torch.setnumthreads(nthread) -- revert
  358. end
  359. local function get_oracle_data(x, y, instance_loss, k, samples)
  360. local index = torch.LongTensor(instance_loss:size(1))
  361. local dummy = torch.Tensor(instance_loss:size(1))
  362. torch.topk(dummy, index, instance_loss, k, 1, true)
  363. print("MSE of all data: " ..instance_loss:mean() .. ", MSE of oracle data: " .. dummy:mean())
  364. local shuffle = torch.randperm(k)
  365. local x_s = x:size()
  366. local y_s = y:size()
  367. x_s[1] = samples
  368. y_s[1] = samples
  369. local oracle_x = torch.Tensor(table.unpack(torch.totable(x_s)))
  370. local oracle_y = torch.Tensor(table.unpack(torch.totable(y_s)))
  371. for i = 1, samples do
  372. oracle_x[i]:copy(x[index[shuffle[i]]])
  373. oracle_y[i]:copy(y[index[shuffle[i]]])
  374. end
  375. return oracle_x, oracle_y
  376. end
  377. local function remove_small_image(x)
  378. local compression = require 'compression'
  379. local new_x = {}
  380. for i = 1, #x do
  381. local xe, meta, x_s
  382. xe = x[i]
  383. if type(x) == "table" and type(x[2]) == "table" then
  384. if xe[1].x and xe[1].y then
  385. x_s = compression.size(xe[1].y) -- y size
  386. else
  387. x_s = compression.size(xe[1])
  388. end
  389. else
  390. x_s = compression.size(xe)
  391. end
  392. if x_s[2] / settings.scale > settings.crop_size + 32 and
  393. x_s[3] / settings.scale > settings.crop_size + 32 then
  394. table.insert(new_x, x[i])
  395. end
  396. if i % 100 == 0 then
  397. collectgarbage()
  398. end
  399. end
  400. print(string.format("%d small images are removed", #x - #new_x))
  401. return new_x
  402. end
  403. local function plot(train, valid)
  404. gnuplot.plot({
  405. {'training', torch.Tensor(train), '-'},
  406. {'validation', torch.Tensor(valid), '-'}})
  407. end
  408. local function train()
  409. local x = torch.load(settings.images)
  410. if settings.method ~= "user" then
  411. x = remove_small_image(x)
  412. end
  413. local train_x, valid_x = split_data(x, math.max(math.floor(settings.validation_rate * #x), 1))
  414. local hist_train = {}
  415. local hist_valid = {}
  416. local model
  417. if settings.resume:len() > 0 then
  418. model = torch.load(settings.resume, "ascii")
  419. else
  420. if stringx.endswith(settings.model, ".lua") then
  421. local create_model = dofile(settings.model)
  422. model = create_model(srcnn, settings)
  423. else
  424. model = srcnn.create(settings.model, settings.backend, settings.color)
  425. end
  426. end
  427. if model.w2nn_input_size then
  428. if settings.crop_size ~= model.w2nn_input_size then
  429. io.stderr:write(string.format("warning: crop_size is replaced with %d\n",
  430. model.w2nn_input_size))
  431. settings.crop_size = model.w2nn_input_size
  432. end
  433. end
  434. if model.w2nn_gcn then
  435. settings.gcn = true
  436. else
  437. settings.gcn = false
  438. end
  439. dir.makepath(settings.model_dir)
  440. local offset = reconstruct.offset_size(model)
  441. transform_pool_init(reconstruct.has_resize(model), offset)
  442. local criterion = create_criterion(model)
  443. local eval_metric = w2nn.ClippedMSECriterion(0, 1):cuda()
  444. local adam_config = {
  445. xLearningRate = settings.learning_rate,
  446. xBatchSize = settings.batch_size,
  447. xLearningRateDecay = settings.learning_rate_decay,
  448. xInstanceLoss = (settings.oracle_rate > 0)
  449. }
  450. local ch = nil
  451. if settings.color == "y" then
  452. ch = 1
  453. elseif settings.color == "rgb" then
  454. ch = 3
  455. end
  456. local best_score = 1000.0
  457. print("# make validation-set")
  458. local valid_xy = make_validation_set(valid_x,
  459. settings.validation_crops,
  460. settings.patches)
  461. valid_x = nil
  462. collectgarbage()
  463. model:cuda()
  464. print("load .. " .. #train_x)
  465. local x = nil
  466. local y = torch.Tensor(settings.patches * #train_x,
  467. ch * (settings.crop_size - offset * 2) * (settings.crop_size - offset * 2)):zero()
  468. if reconstruct.has_resize(model) then
  469. x = torch.Tensor(settings.patches * #train_x,
  470. ch, settings.crop_size / settings.scale, settings.crop_size / settings.scale)
  471. else
  472. x = torch.Tensor(settings.patches * #train_x,
  473. ch, settings.crop_size, settings.crop_size)
  474. end
  475. local instance_loss = nil
  476. for epoch = 1, settings.epoch do
  477. model:training()
  478. print("# " .. epoch)
  479. if adam_config.learningRate then
  480. print("learning rate: " .. adam_config.learningRate)
  481. end
  482. print("## resampling")
  483. if instance_loss then
  484. -- active learning
  485. local oracle_k = math.min(x:size(1) * (settings.oracle_rate * (1 / (1 - settings.oracle_drop_rate))), x:size(1))
  486. local oracle_n = math.min(x:size(1) * settings.oracle_rate, x:size(1))
  487. if oracle_n > 0 then
  488. local oracle_x, oracle_y = get_oracle_data(x, y, instance_loss, oracle_k, oracle_n)
  489. resampling(x:narrow(1, oracle_x:size(1) + 1, x:size(1)-oracle_x:size(1)),
  490. y:narrow(1, oracle_x:size(1) + 1, x:size(1) - oracle_x:size(1)), train_x)
  491. x:narrow(1, 1, oracle_x:size(1)):copy(oracle_x)
  492. y:narrow(1, 1, oracle_y:size(1)):copy(oracle_y)
  493. local draw_n = math.floor(math.sqrt(oracle_x:size(1), 0.5))
  494. if draw_n > 100 then
  495. draw_n = 100
  496. end
  497. image.save(path.join(settings.model_dir, "oracle_x.png"),
  498. image.toDisplayTensor({
  499. input = oracle_x:narrow(1, 1, draw_n * draw_n),
  500. padding = 2,
  501. nrow = draw_n,
  502. min = 0,
  503. max = 1}))
  504. else
  505. resampling(x, y, train_x)
  506. end
  507. else
  508. resampling(x, y, train_x, pairwise_func)
  509. end
  510. collectgarbage()
  511. instance_loss = torch.Tensor(x:size(1)):zero()
  512. for i = 1, settings.inner_epoch do
  513. model:training()
  514. local train_score, il = minibatch_adam(model, criterion, eval_metric, x, y, adam_config)
  515. instance_loss:copy(il)
  516. print(train_score)
  517. model:evaluate()
  518. print("# validation")
  519. local score = validate(model, criterion, eval_metric, valid_xy, adam_config.xBatchSize)
  520. table.insert(hist_train, train_score.loss)
  521. table.insert(hist_valid, score.loss)
  522. if settings.plot then
  523. plot(hist_train, hist_valid)
  524. end
  525. local score_for_update
  526. if settings.update_criterion == "mse" then
  527. score_for_update = score.MSE
  528. else
  529. score_for_update = score.loss
  530. end
  531. if score_for_update < best_score then
  532. local test_image = image_loader.load_float(settings.test) -- reload
  533. best_score = score_for_update
  534. print("* model has updated")
  535. if settings.save_history then
  536. torch.save(settings.model_file_best, model:clearState(), "ascii")
  537. torch.save(string.format(settings.model_file, epoch, i), model:clearState(), "ascii")
  538. if settings.method == "noise" then
  539. local log = path.join(settings.model_dir,
  540. ("noise%d_best.%d-%d.png"):format(settings.noise_level,
  541. epoch, i))
  542. save_test_jpeg(model, test_image, log)
  543. elseif settings.method == "scale" then
  544. local log = path.join(settings.model_dir,
  545. ("scale%.1f_best.%d-%d.png"):format(settings.scale,
  546. epoch, i))
  547. save_test_scale(model, test_image, log)
  548. elseif settings.method == "noise_scale" then
  549. local log = path.join(settings.model_dir,
  550. ("noise%d_scale%.1f_best.%d-%d.png"):format(settings.noise_level,
  551. settings.scale,
  552. epoch, i))
  553. save_test_scale(model, test_image, log)
  554. elseif settings.method == "user" then
  555. local log = path.join(settings.model_dir,
  556. ("%s_best.%d-%d.png"):format(settings.name,
  557. epoch, i))
  558. save_test_user(model, test_image, log)
  559. end
  560. else
  561. torch.save(settings.model_file, model:clearState(), "ascii")
  562. if settings.method == "noise" then
  563. local log = path.join(settings.model_dir,
  564. ("noise%d_best.png"):format(settings.noise_level))
  565. save_test_jpeg(model, test_image, log)
  566. elseif settings.method == "scale" then
  567. local log = path.join(settings.model_dir,
  568. ("scale%.1f_best.png"):format(settings.scale))
  569. save_test_scale(model, test_image, log)
  570. elseif settings.method == "noise_scale" then
  571. local log = path.join(settings.model_dir,
  572. ("noise%d_scale%.1f_best.png"):format(settings.noise_level,
  573. settings.scale))
  574. save_test_scale(model, test_image, log)
  575. elseif settings.method == "user" then
  576. local log = path.join(settings.model_dir,
  577. ("%s_best.png"):format(settings.name))
  578. save_test_user(model, test_image, log)
  579. end
  580. end
  581. end
  582. print("Batch-wise PSNR: " .. score.PSNR .. ", loss: " .. score.loss .. ", MSE: " .. score.MSE .. ", best: " .. best_score)
  583. collectgarbage()
  584. end
  585. end
  586. end
  587. if settings.gpu > 0 then
  588. cutorch.setDevice(settings.gpu)
  589. end
  590. torch.manualSeed(settings.seed)
  591. cutorch.manualSeed(settings.seed)
  592. print(settings)
  593. train()