train.lua 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
  2. package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.path
  3. require 'optim'
  4. require 'xlua'
  5. require 'pl'
  6. require 'w2nn'
  7. local settings = require 'settings'
  8. local srcnn = require 'srcnn'
  9. local minibatch_adam = require 'minibatch_adam'
  10. local iproc = require 'iproc'
  11. local reconstruct = require 'reconstruct'
  12. local compression = require 'compression'
  13. local pairwise_transform = require 'pairwise_transform'
  14. local image_loader = require 'image_loader'
  15. local function save_test_scale(model, rgb, file)
  16. local up = reconstruct.scale(model, settings.scale, rgb)
  17. image.save(file, up)
  18. end
  19. local function save_test_jpeg(model, rgb, file)
  20. local im, count = reconstruct.image(model, rgb)
  21. image.save(file, im)
  22. end
  23. local function split_data(x, test_size)
  24. local index = torch.randperm(#x)
  25. local train_size = #x - test_size
  26. local train_x = {}
  27. local valid_x = {}
  28. for i = 1, train_size do
  29. train_x[i] = x[index[i]]
  30. end
  31. for i = 1, test_size do
  32. valid_x[i] = x[index[train_size + i]]
  33. end
  34. return train_x, valid_x
  35. end
  36. local function make_validation_set(x, transformer, n, batch_size)
  37. n = n or 4
  38. local data = {}
  39. for i = 1, #x do
  40. for k = 1, math.max(n / batch_size, 1) do
  41. local xy = transformer(x[i], true, batch_size)
  42. local tx = torch.Tensor(batch_size, xy[1][1]:size(1), xy[1][1]:size(2), xy[1][1]:size(3))
  43. local ty = torch.Tensor(batch_size, xy[1][2]:size(1), xy[1][2]:size(2), xy[1][2]:size(3))
  44. for j = 1, #xy do
  45. tx[j]:copy(xy[j][1])
  46. ty[j]:copy(xy[j][2])
  47. end
  48. table.insert(data, {x = tx, y = ty})
  49. end
  50. xlua.progress(i, #x)
  51. collectgarbage()
  52. end
  53. return data
  54. end
  55. local function validate(model, criterion, data)
  56. local loss = 0
  57. for i = 1, #data do
  58. local z = model:forward(data[i].x:cuda())
  59. loss = loss + criterion:forward(z, data[i].y:cuda())
  60. if i % 100 == 0 then
  61. xlua.progress(i, #data)
  62. collectgarbage()
  63. end
  64. end
  65. xlua.progress(#data, #data)
  66. return loss / #data
  67. end
  68. local function create_criterion(model)
  69. if reconstruct.is_rgb(model) then
  70. local offset = reconstruct.offset_size(model)
  71. local output_w = settings.crop_size - offset * 2
  72. local weight = torch.Tensor(3, output_w * output_w)
  73. weight[1]:fill(0.29891 * 3) -- R
  74. weight[2]:fill(0.58661 * 3) -- G
  75. weight[3]:fill(0.11448 * 3) -- B
  76. return w2nn.WeightedHuberCriterion(weight, 0.1):cuda()
  77. else
  78. return nn.MSECriterion():cuda()
  79. end
  80. end
  81. local function transformer(x, is_validation, n, offset)
  82. x = compression.decompress(x)
  83. n = n or settings.batch_size;
  84. if is_validation == nil then is_validation = false end
  85. local color_noise = nil
  86. local overlay = nil
  87. local active_cropping_ratio = nil
  88. local active_cropping_tries = nil
  89. if is_validation then
  90. active_cropping_rate = 0.0
  91. active_cropping_tries = 0
  92. color_noise = false
  93. overlay = false
  94. else
  95. active_cropping_rate = settings.active_cropping_rate
  96. active_cropping_tries = settings.active_cropping_tries
  97. color_noise = settings.color_noise
  98. overlay = settings.overlay
  99. end
  100. if settings.method == "scale" then
  101. return pairwise_transform.scale(x,
  102. settings.scale,
  103. settings.crop_size, offset,
  104. n,
  105. { color_noise = color_noise,
  106. overlay = overlay,
  107. random_half = settings.random_half,
  108. active_cropping_rate = active_cropping_rate,
  109. active_cropping_tries = active_cropping_tries,
  110. rgb = (settings.color == "rgb")
  111. })
  112. elseif settings.method == "noise" then
  113. return pairwise_transform.jpeg(x,
  114. settings.category,
  115. settings.noise_level,
  116. settings.crop_size, offset,
  117. n,
  118. { color_noise = color_noise,
  119. overlay = overlay,
  120. active_cropping_rate = active_cropping_rate,
  121. active_cropping_tries = active_cropping_tries,
  122. random_half = settings.random_half,
  123. jpeg_sampling_factors = settings.jpeg_sampling_factors,
  124. rgb = (settings.color == "rgb")
  125. })
  126. end
  127. end
  128. local function train()
  129. local model = srcnn.create(settings.method, settings.backend, settings.color)
  130. local offset = reconstruct.offset_size(model)
  131. local pairwise_func = function(x, is_validation, n)
  132. return transformer(x, is_validation, n, offset)
  133. end
  134. local criterion = create_criterion(model)
  135. local x = torch.load(settings.images)
  136. local lrd_count = 0
  137. local train_x, valid_x = split_data(x, math.floor(settings.validation_ratio * #x))
  138. local adam_config = {
  139. learningRate = settings.learning_rate,
  140. xBatchSize = settings.batch_size,
  141. }
  142. local ch = nil
  143. if settings.color == "y" then
  144. ch = 1
  145. elseif settings.color == "rgb" then
  146. ch = 3
  147. end
  148. local best_score = 100000.0
  149. print("# make validation-set")
  150. local valid_xy = make_validation_set(valid_x, pairwise_func,
  151. settings.validation_crops,
  152. settings.batch_size)
  153. valid_x = nil
  154. collectgarbage()
  155. model:cuda()
  156. print("load .. " .. #train_x)
  157. for epoch = 1, settings.epoch do
  158. model:training()
  159. print("# " .. epoch)
  160. print(minibatch_adam(model, criterion, train_x, adam_config,
  161. pairwise_func,
  162. {ch, settings.crop_size, settings.crop_size},
  163. {ch, settings.crop_size - offset * 2, settings.crop_size - offset * 2}
  164. ))
  165. model:evaluate()
  166. print("# validation")
  167. local score = validate(model, criterion, valid_xy)
  168. if score < best_score then
  169. local test_image = image_loader.load_float(settings.test) -- reload
  170. lrd_count = 0
  171. best_score = score
  172. print("* update best model")
  173. torch.save(settings.model_file, model)
  174. if settings.method == "noise" then
  175. local log = path.join(settings.model_dir,
  176. ("noise%d_best.png"):format(settings.noise_level))
  177. save_test_jpeg(model, test_image, log)
  178. elseif settings.method == "scale" then
  179. local log = path.join(settings.model_dir,
  180. ("scale%.1f_best.png"):format(settings.scale))
  181. save_test_scale(model, test_image, log)
  182. end
  183. else
  184. lrd_count = lrd_count + 1
  185. if lrd_count > 5 then
  186. lrd_count = 0
  187. adam_config.learningRate = adam_config.learningRate * 0.9
  188. print("* learning rate decay: " .. adam_config.learningRate)
  189. end
  190. end
  191. print("current: " .. score .. ", best: " .. best_score)
  192. collectgarbage()
  193. end
  194. end
  195. torch.manualSeed(settings.seed)
  196. cutorch.manualSeed(settings.seed)
  197. print(settings)
  198. train()