waifu2x.lua 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. require 'pl'
  2. local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
  3. package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.path
  4. require 'sys'
  5. require 'w2nn'
  6. local iproc = require 'iproc'
  7. local reconstruct = require 'reconstruct'
  8. local image_loader = require 'image_loader'
  9. local alpha_util = require 'alpha_util'
  10. torch.setdefaulttensortype('torch.FloatTensor')
  11. local function convert_image(opt)
  12. local x, alpha = image_loader.load_float(opt.i)
  13. local new_x = nil
  14. local t = sys.clock()
  15. local scale_f, image_f
  16. if opt.tta == 1 then
  17. scale_f = reconstruct.scale_tta
  18. image_f = reconstruct.image_tta
  19. else
  20. scale_f = reconstruct.scale
  21. image_f = reconstruct.image
  22. end
  23. if opt.o == "(auto)" then
  24. local name = path.basename(opt.i)
  25. local e = path.extension(name)
  26. local base = name:sub(0, name:len() - e:len())
  27. opt.o = path.join(path.dirname(opt.i), string.format("%s_%s.png", base, opt.m))
  28. end
  29. if opt.m == "noise" then
  30. local model_path = path.join(opt.model_dir, ("noise%d_model.t7"):format(opt.noise_level))
  31. local model = torch.load(model_path, "ascii")
  32. if not model then
  33. error("Load Error: " .. model_path)
  34. end
  35. new_x = image_f(model, x, opt.crop_size)
  36. new_x = alpha_util.composite(new_x, alpha)
  37. elseif opt.m == "scale" then
  38. local model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
  39. local model = torch.load(model_path, "ascii")
  40. if not model then
  41. error("Load Error: " .. model_path)
  42. end
  43. x = alpha_util.make_border(x, alpha, reconstruct.offset_size(model))
  44. new_x = scale_f(model, opt.scale, x, opt.crop_size)
  45. new_x = alpha_util.composite(new_x, alpha, model)
  46. elseif opt.m == "noise_scale" then
  47. local noise_model_path = path.join(opt.model_dir, ("noise%d_model.t7"):format(opt.noise_level))
  48. local noise_model = torch.load(noise_model_path, "ascii")
  49. local scale_model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
  50. local scale_model = torch.load(scale_model_path, "ascii")
  51. if not noise_model then
  52. error("Load Error: " .. noise_model_path)
  53. end
  54. if not scale_model then
  55. error("Load Error: " .. scale_model_path)
  56. end
  57. x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
  58. x = image_f(noise_model, x, opt.crop_size)
  59. new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
  60. new_x = alpha_util.composite(new_x, alpha, scale_model)
  61. else
  62. error("undefined method:" .. opt.method)
  63. end
  64. image_loader.save_png(opt.o, new_x, opt.depth)
  65. print(opt.o .. ": " .. (sys.clock() - t) .. " sec")
  66. end
  67. local function convert_frames(opt)
  68. local model_path, noise1_model, noise2_model, scale_model
  69. local scale_f, image_f
  70. if opt.tta == 1 then
  71. scale_f = reconstruct.scale_tta
  72. image_f = reconstruct.image_tta
  73. else
  74. scale_f = reconstruct.scale
  75. image_f = reconstruct.image
  76. end
  77. if opt.m == "scale" then
  78. model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
  79. scale_model = torch.load(model_path, "ascii")
  80. if not scale_model then
  81. error("Load Error: " .. model_path)
  82. end
  83. elseif opt.m == "noise" and opt.noise_level == 1 then
  84. model_path = path.join(opt.model_dir, "noise1_model.t7")
  85. noise1_model = torch.load(model_path, "ascii")
  86. if not noise1_model then
  87. error("Load Error: " .. model_path)
  88. end
  89. elseif opt.m == "noise" and opt.noise_level == 2 then
  90. model_path = path.join(opt.model_dir, "noise2_model.t7")
  91. noise2_model = torch.load(model_path, "ascii")
  92. if not noise2_model then
  93. error("Load Error: " .. model_path)
  94. end
  95. elseif opt.m == "noise_scale" then
  96. model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
  97. scale_model = torch.load(model_path, "ascii")
  98. if not scale_model then
  99. error("Load Error: " .. model_path)
  100. end
  101. if opt.noise_level == 1 then
  102. model_path = path.join(opt.model_dir, "noise1_model.t7")
  103. noise1_model = torch.load(model_path, "ascii")
  104. if not noise1_model then
  105. error("Load Error: " .. model_path)
  106. end
  107. elseif opt.noise_level == 2 then
  108. model_path = path.join(opt.model_dir, "noise2_model.t7")
  109. noise2_model = torch.load(model_path, "ascii")
  110. if not noise2_model then
  111. error("Load Error: " .. model_path)
  112. end
  113. end
  114. end
  115. local fp = io.open(opt.l)
  116. if not fp then
  117. error("Open Error: " .. opt.l)
  118. end
  119. local count = 0
  120. local lines = {}
  121. for line in fp:lines() do
  122. table.insert(lines, line)
  123. end
  124. fp:close()
  125. for i = 1, #lines do
  126. if opt.resume == 0 or path.exists(string.format(opt.o, i)) == false then
  127. local x, alpha = image_loader.load_float(lines[i])
  128. local new_x = nil
  129. if opt.m == "noise" and opt.noise_level == 1 then
  130. new_x = image_f(noise1_model, x, opt.crop_size)
  131. new_x = alpha_util.composite(new_x, alpha)
  132. elseif opt.m == "noise" and opt.noise_level == 2 then
  133. new_x = image_f(noise2_model, x, opt.crop_size)
  134. new_x = alpha_util.composite(new_x, alpha)
  135. elseif opt.m == "scale" then
  136. x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
  137. new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
  138. new_x = alpha_util.composite(new_x, alpha, scale_model)
  139. elseif opt.m == "noise_scale" and opt.noise_level == 1 then
  140. x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
  141. x = image_f(noise1_model, x, opt.crop_size)
  142. new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
  143. new_x = alpha_util.composite(new_x, alpha, scale_model)
  144. elseif opt.m == "noise_scale" and opt.noise_level == 2 then
  145. x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
  146. x = image_f(noise2_model, x, opt.crop_size)
  147. new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
  148. new_x = alpha_util.composite(new_x, alpha, scale_model)
  149. else
  150. error("undefined method:" .. opt.method)
  151. end
  152. local output = nil
  153. if opt.o == "(auto)" then
  154. local name = path.basename(lines[i])
  155. local e = path.extension(name)
  156. local base = name:sub(0, name:len() - e:len())
  157. output = path.join(path.dirname(opt.i), string.format("%s(%s).png", base, opt.m))
  158. else
  159. output = string.format(opt.o, i)
  160. end
  161. image_loader.save_png(output, new_x, opt.depth)
  162. xlua.progress(i, #lines)
  163. if i % 10 == 0 then
  164. collectgarbage()
  165. end
  166. else
  167. xlua.progress(i, #lines)
  168. end
  169. end
  170. end
  171. local function waifu2x()
  172. local cmd = torch.CmdLine()
  173. cmd:text()
  174. cmd:text("waifu2x")
  175. cmd:text("Options:")
  176. cmd:option("-i", "images/miku_small.png", 'path to input image')
  177. cmd:option("-l", "", 'path to image-list.txt')
  178. cmd:option("-scale", 2, 'scale factor')
  179. cmd:option("-o", "(auto)", 'path to output file')
  180. cmd:option("-depth", 8, 'bit-depth of the output image (8|16)')
  181. cmd:option("-model_dir", "./models/anime_style_art_rgb", 'path to model directory')
  182. cmd:option("-m", "noise_scale", 'method (noise|scale|noise_scale)')
  183. cmd:option("-noise_level", 1, '(1|2)')
  184. cmd:option("-crop_size", 128, 'patch size per process')
  185. cmd:option("-resume", 0, "skip existing files (0|1)")
  186. cmd:option("-thread", -1, "number of CPU threads")
  187. cmd:option("-tta", 0, '8x slower and slightly high quality (0|1)')
  188. local opt = cmd:parse(arg)
  189. if opt.thread > 0 then
  190. torch.setnumthreads(opt.thread)
  191. end
  192. if cudnn then
  193. cudnn.fastest = true
  194. cudnn.benchmark = false
  195. end
  196. if string.len(opt.l) == 0 then
  197. convert_image(opt)
  198. else
  199. convert_frames(opt)
  200. end
  201. end
  202. waifu2x()