| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427 | require 'pl'local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.pathrequire 'optim'require 'xlua'require 'w2nn'local settings = require 'settings'local srcnn = require 'srcnn'local minibatch_adam = require 'minibatch_adam'local iproc = require 'iproc'local reconstruct = require 'reconstruct'local compression = require 'compression'local pairwise_transform = require 'pairwise_transform'local image_loader = require 'image_loader'local function save_test_scale(model, rgb, file)   local up = reconstruct.scale(model, settings.scale, rgb)   image.save(file, up)endlocal function save_test_jpeg(model, rgb, file)   local im, count = reconstruct.image(model, rgb)   image.save(file, im)endlocal function split_data(x, test_size)   local index = torch.randperm(#x)   local train_size = #x - test_size   local train_x = {}   local valid_x = {}   for i = 1, train_size do      train_x[i] = x[index[i]]   end   for i = 1, test_size do      valid_x[i] = x[index[train_size + i]]   end   return train_x, valid_xendlocal function make_validation_set(x, transformer, n, patches)   n = n or 4   local validation_patches = math.min(16, patches or 16)   local data = {}   for i = 1, #x do      for k = 1, math.max(n / validation_patches, 1) do	 local xy = transformer(x[i], true, validation_patches)	 for j = 1, #xy do	    table.insert(data, {x = xy[j][1], y = xy[j][2]})	 end      end      xlua.progress(i, #x)      collectgarbage()   end   local new_data = {}   local perm = torch.randperm(#data)   for i = 1, perm:size(1) do      new_data[i] = data[perm[i]]   end   data = new_data   return dataendlocal function validate(model, criterion, eval_metric, data, batch_size)   local loss = 0   local mse = 0   local loss_count = 0   local inputs_tmp = torch.Tensor(batch_size,				   data[1].x:size(1), 				   data[1].x:size(2),				   data[1].x:size(3)):zero()   local targets_tmp = torch.Tensor(batch_size,				    data[1].y:size(1),				    data[1].y:size(2),				    data[1].y:size(3)):zero()   local inputs = inputs_tmp:clone():cuda()   local targets = targets_tmp:clone():cuda()   for t = 1, #data, batch_size do      if t + batch_size -1 > #data then	 break      end      for i = 1, batch_size do         inputs_tmp[i]:copy(data[t + i - 1].x)	 targets_tmp[i]:copy(data[t + i - 1].y)      end      inputs:copy(inputs_tmp)      targets:copy(targets_tmp)      local z = model:forward(inputs)      loss = loss + criterion:forward(z, targets)      mse = mse + eval_metric:forward(z, targets)      loss_count = loss_count + 1      if loss_count % 10 == 0 then	 xlua.progress(t, #data)	 collectgarbage()      end   end   xlua.progress(#data, #data)   return {loss = loss / loss_count, MSE = mse / loss_count, PSNR = 10 * math.log10(1 / (mse / loss_count))}endlocal function create_criterion(model, loss)   if reconstruct.is_rgb(model) then      local offset = reconstruct.offset_size(model)      local output_w = settings.crop_size - offset * 2      local weight = torch.Tensor(3, output_w * output_w)      if loss == "y" then	 weight[1]:fill(0.29891 * 3) -- R	 weight[2]:fill(0.58661 * 3) -- G	 weight[3]:fill(0.11448 * 3) -- B      else	 weight:fill(1)      end      return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()   else      local offset = reconstruct.offset_size(model)      local output_w = settings.crop_size - offset * 2      local weight = torch.Tensor(1, output_w * output_w)      weight[1]:fill(1.0)      return w2nn.ClippedWeightedHuberCriterion(weight, 0.1, {0.0, 1.0}):cuda()   endendlocal function transformer(model, x, is_validation, n, offset)   local meta = {data = {}}   if type(x) == "table" and type(x[2]) == "table" then      meta = x[2]      x = compression.decompress(x[1])   else      x = compression.decompress(x)   end   n = n or settings.patches   if is_validation == nil then is_validation = false end   local random_color_noise_rate = nil    local random_overlay_rate = nil   local active_cropping_rate = nil   local active_cropping_tries = nil   if is_validation then      active_cropping_rate = settings.active_cropping_rate      active_cropping_tries = settings.active_cropping_tries      random_color_noise_rate = 0.0      random_overlay_rate = 0.0   else      active_cropping_rate = settings.active_cropping_rate      active_cropping_tries = settings.active_cropping_tries      random_color_noise_rate = settings.random_color_noise_rate      random_overlay_rate = settings.random_overlay_rate   end   if settings.method == "scale" then      local conf = tablex.update({	    downsampling_filters = settings.downsampling_filters,	    random_half_rate = settings.random_half_rate,	    random_color_noise_rate = random_color_noise_rate,	    random_overlay_rate = random_overlay_rate,	    random_unsharp_mask_rate = settings.random_unsharp_mask_rate,	    max_size = settings.max_size,	    active_cropping_rate = active_cropping_rate,	    active_cropping_tries = active_cropping_tries,	    rgb = (settings.color == "rgb"),	    x_upsampling = not reconstruct.has_resize(model),	    resize_blur_min = settings.resize_blur_min,	 resize_blur_max = settings.resize_blur_max}, meta)      return pairwise_transform.scale(x,				      settings.scale,				      settings.crop_size, offset,				      n, conf)   elseif settings.method == "noise" then      local conf = tablex.update({	    random_half_rate = settings.random_half_rate,	    random_color_noise_rate = random_color_noise_rate,	    random_overlay_rate = random_overlay_rate,	    random_unsharp_mask_rate = settings.random_unsharp_mask_rate,	    max_size = settings.max_size,	    jpeg_chroma_subsampling_rate = settings.jpeg_chroma_subsampling_rate,	    active_cropping_rate = active_cropping_rate,	    active_cropping_tries = active_cropping_tries,	    nr_rate = settings.nr_rate,	    rgb = (settings.color == "rgb")}, meta)      return pairwise_transform.jpeg(x,				     settings.style,				     settings.noise_level,				     settings.crop_size, offset,				     n, conf)   elseif settings.method == "noise_scale" then      local conf = tablex.update({	    downsampling_filters = settings.downsampling_filters,	    random_half_rate = settings.random_half_rate,	    random_color_noise_rate = random_color_noise_rate,	    random_overlay_rate = random_overlay_rate,	    random_unsharp_mask_rate = settings.random_unsharp_mask_rate,	    max_size = settings.max_size,	    jpeg_chroma_subsampling_rate = settings.jpeg_chroma_subsampling_rate,	    nr_rate = settings.nr_rate,	    active_cropping_rate = active_cropping_rate,	    active_cropping_tries = active_cropping_tries,	    rgb = (settings.color == "rgb"),	    x_upsampling = not reconstruct.has_resize(model),	    resize_blur_min = settings.resize_blur_min,	    resize_blur_max = settings.resize_blur_max}, meta)      return pairwise_transform.jpeg_scale(x,					   settings.scale,					   settings.style,					   settings.noise_level,					   settings.crop_size, offset,					   n, conf)   endendlocal function resampling(x, y, train_x, transformer, input_size, target_size)   local c = 1   local shuffle = torch.randperm(#train_x)   for t = 1, #train_x do      xlua.progress(t, #train_x)      local xy = transformer(train_x[shuffle[t]], false, settings.patches)      for i = 1, #xy do         x[c]:copy(xy[i][1])	 y[c]:copy(xy[i][2])	 c = c + 1	 if c > x:size(1) then	    break	 end      end      if c > x:size(1) then	 break      end      if t % 50 == 0 then	 collectgarbage()      end   end   xlua.progress(#train_x, #train_x)endlocal function get_oracle_data(x, y, instance_loss, k, samples)   local index = torch.LongTensor(instance_loss:size(1))   local dummy = torch.Tensor(instance_loss:size(1))   torch.topk(dummy, index, instance_loss, k, 1, true)   print("MSE of all data: " ..instance_loss:mean() .. ", MSE of oracle data: " .. dummy:mean())   local shuffle = torch.randperm(k)   local x_s = x:size()   local y_s = y:size()   x_s[1] = samples   y_s[1] = samples   local oracle_x = torch.Tensor(table.unpack(torch.totable(x_s)))   local oracle_y = torch.Tensor(table.unpack(torch.totable(y_s)))   for i = 1, samples do      oracle_x[i]:copy(x[index[shuffle[i]]])      oracle_y[i]:copy(y[index[shuffle[i]]])   end   return oracle_x, oracle_yendlocal function remove_small_image(x)   local new_x = {}   for i = 1, #x do      local xe, meta, x_s      xe = x[i]      if type(xe) == "table" and type(xe[2]) == "table" then	 x_s = compression.size(xe[1])      else	 x_s = compression.size(xe)      end      if x_s[2] / settings.scale > settings.crop_size + 32 and      x_s[3] / settings.scale > settings.crop_size + 32 then	 table.insert(new_x, x[i])      end      if i % 100 == 0 then	 collectgarbage()      end   end   print(string.format("%d small images are removed", #x - #new_x))   return new_xendlocal function plot(train, valid)   gnuplot.plot({	 {'training', torch.Tensor(train), '-'},	 {'validation', torch.Tensor(valid), '-'}})endlocal function train()   local hist_train = {}   local hist_valid = {}   local model   if settings.resume:len() > 0 then      model = torch.load(settings.resume, "ascii")   else      model = srcnn.create(settings.model, settings.backend, settings.color)   end   local offset = reconstruct.offset_size(model)   local pairwise_func = function(x, is_validation, n)      return transformer(model, x, is_validation, n, offset)   end   local criterion = create_criterion(model, settings.loss)   local eval_metric = w2nn.ClippedMSECriterion(0, 1):cuda()   local x = remove_small_image(torch.load(settings.images))   local train_x, valid_x = split_data(x, math.max(math.floor(settings.validation_rate * #x), 1))   local adam_config = {      xLearningRate = settings.learning_rate,      xBatchSize = settings.batch_size,      xLearningRateDecay = settings.learning_rate_decay   }   local ch = nil   if settings.color == "y" then      ch = 1   elseif settings.color == "rgb" then      ch = 3   end   local best_score = 1000.0   print("# make validation-set")   local valid_xy = make_validation_set(valid_x, pairwise_func,					settings.validation_crops,					settings.patches)   valid_x = nil      collectgarbage()   model:cuda()   print("load .. " .. #train_x)   local x = nil   local y = torch.Tensor(settings.patches * #train_x,			  ch * (settings.crop_size - offset * 2) * (settings.crop_size - offset * 2)):zero()   if reconstruct.has_resize(model) then      x = torch.Tensor(settings.patches * #train_x,		       ch, settings.crop_size / settings.scale, settings.crop_size / settings.scale)   else      x = torch.Tensor(settings.patches * #train_x,		       ch, settings.crop_size, settings.crop_size)   end   local instance_loss = nil   for epoch = 1, settings.epoch do      model:training()      print("# " .. epoch)      if adam_config.learningRate then	 print("learning rate: " .. adam_config.learningRate)      end      print("## resampling")      if instance_loss then	 -- active learning	 local oracle_k = math.min(x:size(1) * (settings.oracle_rate * (1 / (1 - settings.oracle_drop_rate))), x:size(1))	 local oracle_n = math.min(x:size(1) * settings.oracle_rate, x:size(1))	 if oracle_n > 0 then	    local oracle_x, oracle_y = get_oracle_data(x, y, instance_loss, oracle_k, oracle_n)	    resampling(x:narrow(1, oracle_x:size(1) + 1, x:size(1)-oracle_x:size(1)),		       y:narrow(1, oracle_x:size(1) + 1, x:size(1) - oracle_x:size(1)), train_x, pairwise_func)	    x:narrow(1, 1, oracle_x:size(1)):copy(oracle_x)	    y:narrow(1, 1, oracle_y:size(1)):copy(oracle_y)	    local draw_n = math.floor(math.sqrt(oracle_x:size(1), 0.5))	    if draw_n > 100 then	       draw_n = 100	    end	    image.save(path.join(settings.model_dir, "oracle_x.png"), 		       image.toDisplayTensor({			     input = oracle_x:narrow(1, 1, draw_n * draw_n),			     padding = 2,			     nrow = draw_n,			     min = 0,			     max = 1}))	 else	    resampling(x, y, train_x, pairwise_func)	 end      else	 resampling(x, y, train_x, pairwise_func)      end      collectgarbage()      instance_loss = torch.Tensor(x:size(1)):zero()      for i = 1, settings.inner_epoch do	 model:training()	 local train_score, il = minibatch_adam(model, criterion, eval_metric, x, y, adam_config)	 instance_loss:copy(il)	 print(train_score)	 model:evaluate()	 print("# validation")	 local score = validate(model, criterion, eval_metric, valid_xy, adam_config.xBatchSize)	 table.insert(hist_train, train_score.loss)	 table.insert(hist_valid, score.loss)	 if settings.plot then	    plot(hist_train, hist_valid)	 end	 if score.MSE < best_score then	    local test_image = image_loader.load_float(settings.test) -- reload	    best_score = score.MSE	    print("* Best model is updated")	    if settings.save_history then	       torch.save(settings.model_file_best, model:clearState(), "ascii")	       torch.save(string.format(settings.model_file, epoch, i), model:clearState(), "ascii")	       if settings.method == "noise" then		  local log = path.join(settings.model_dir,					("noise%d_best.%d-%d.png"):format(settings.noise_level,									  epoch, i))		  save_test_jpeg(model, test_image, log)	       elseif settings.method == "scale" then		  local log = path.join(settings.model_dir,					("scale%.1f_best.%d-%d.png"):format(settings.scale,									    epoch, i))		  save_test_scale(model, test_image, log)	       elseif settings.method == "noise_scale" then		  local log = path.join(settings.model_dir,					("noise%d_scale%.1f_best.%d-%d.png"):format(settings.noise_level, 										    settings.scale,										    epoch, i))		  save_test_scale(model, test_image, log)	       end	    else	       torch.save(settings.model_file, model:clearState(), "ascii")	       if settings.method == "noise" then		  local log = path.join(settings.model_dir,					("noise%d_best.png"):format(settings.noise_level))		  save_test_jpeg(model, test_image, log)	       elseif settings.method == "scale" then		  local log = path.join(settings.model_dir,					("scale%.1f_best.png"):format(settings.scale))		  save_test_scale(model, test_image, log)	       elseif settings.method == "noise_scale" then		  local log = path.join(settings.model_dir,					("noise%d_scale%.1f_best.png"):format(settings.noise_level, 									      settings.scale))		  save_test_scale(model, test_image, log)	       end	    end	 end	 print("Batch-wise PSNR: " .. score.PSNR .. ", loss: " .. score.loss .. ", MSE: " .. score.MSE .. ", Minimum MSE: " .. best_score)	 collectgarbage()      end   endendif settings.gpu > 0 then   cutorch.setDevice(settings.gpu)endtorch.manualSeed(settings.seed)cutorch.manualSeed(settings.seed)print(settings)train()
 |