waifu2x.lua 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. require 'pl'
  2. local __FILE__ = (function() return string.gsub(debug.getinfo(2, 'S').source, "^@", "") end)()
  3. package.path = path.join(path.dirname(__FILE__), "lib", "?.lua;") .. package.path
  4. require 'sys'
  5. require 'w2nn'
  6. local iproc = require 'iproc'
  7. local reconstruct = require 'reconstruct'
  8. local image_loader = require 'image_loader'
  9. local alpha_util = require 'alpha_util'
  10. torch.setdefaulttensortype('torch.FloatTensor')
  11. local function format_output(opt, src, no)
  12. no = no or 1
  13. local name = path.basename(src)
  14. local e = path.extension(name)
  15. local basename = name:sub(0, name:len() - e:len())
  16. if opt.o == "(auto)" then
  17. return path.join(path.dirname(src), string.format("%s_%s.png", basename, opt.m))
  18. else
  19. local basename_pos = opt.o:find("%%s")
  20. local no_pos = opt.o:find("%%%d*d")
  21. if basename_pos ~= nil and no_pos ~= nil then
  22. if basename_pos < no_pos then
  23. return string.format(opt.o, basename, no)
  24. else
  25. return string.format(opt.o, no, basename)
  26. end
  27. elseif basename_pos ~= nil then
  28. return string.format(opt.o, basename)
  29. elseif no_pos ~= nil then
  30. return string.format(opt.o, no)
  31. else
  32. return opt.o
  33. end
  34. end
  35. end
  36. local function convert_image(opt)
  37. local x, meta = image_loader.load_float(opt.i)
  38. local alpha = meta.alpha
  39. local new_x = nil
  40. local scale_f, image_f
  41. if opt.tta == 1 then
  42. scale_f = reconstruct.scale_tta
  43. image_f = reconstruct.image_tta
  44. else
  45. scale_f = reconstruct.scale
  46. image_f = reconstruct.image
  47. end
  48. opt.o = format_output(opt, opt.i)
  49. if opt.m == "noise" then
  50. local model_path = path.join(opt.model_dir, ("noise%d_model.t7"):format(opt.noise_level))
  51. local model = torch.load(model_path, "ascii")
  52. if not model then
  53. error("Load Error: " .. model_path)
  54. end
  55. local t = sys.clock()
  56. new_x = image_f(model, x, opt.crop_size)
  57. new_x = alpha_util.composite(new_x, alpha)
  58. print(opt.o .. ": " .. (sys.clock() - t) .. " sec")
  59. elseif opt.m == "scale" then
  60. local model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
  61. local model = torch.load(model_path, "ascii")
  62. if not model then
  63. error("Load Error: " .. model_path)
  64. end
  65. local t = sys.clock()
  66. x = alpha_util.make_border(x, alpha, reconstruct.offset_size(model))
  67. new_x = scale_f(model, opt.scale, x, opt.crop_size, opt.upsampling_filter)
  68. new_x = alpha_util.composite(new_x, alpha, model)
  69. print(opt.o .. ": " .. (sys.clock() - t) .. " sec")
  70. elseif opt.m == "noise_scale" then
  71. local model_path = path.join(opt.model_dir, ("noise%d_scale%.1fx_model.t7"):format(opt.noise_level, opt.scale))
  72. if path.exists(model_path) then
  73. local scale_model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
  74. local scale_model = torch.load(scale_model_path, "ascii")
  75. local model = torch.load(model_path, "ascii")
  76. if not model then
  77. error("Load Error: " .. model_path)
  78. end
  79. if not scale_model then
  80. error("Load Error: " .. model_path)
  81. end
  82. local t = sys.clock()
  83. x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
  84. new_x = scale_f(model, opt.scale, x, opt.crop_size, opt.upsampling_filter)
  85. new_x = alpha_util.composite(new_x, alpha, scale_model)
  86. print(opt.o .. ": " .. (sys.clock() - t) .. " sec")
  87. else
  88. local noise_model_path = path.join(opt.model_dir, ("noise%d_model.t7"):format(opt.noise_level))
  89. local noise_model = torch.load(noise_model_path, "ascii")
  90. local scale_model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
  91. local scale_model = torch.load(scale_model_path, "ascii")
  92. if not noise_model then
  93. error("Load Error: " .. noise_model_path)
  94. end
  95. if not scale_model then
  96. error("Load Error: " .. scale_model_path)
  97. end
  98. local t = sys.clock()
  99. x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
  100. x = image_f(noise_model, x, opt.crop_size)
  101. new_x = scale_f(scale_model, opt.scale, x, opt.crop_size, opt.upsampling_filter)
  102. new_x = alpha_util.composite(new_x, alpha, scale_model)
  103. print(opt.o .. ": " .. (sys.clock() - t) .. " sec")
  104. end
  105. else
  106. error("undefined method:" .. opt.method)
  107. end
  108. image_loader.save_png(opt.o, new_x, tablex.update({depth = opt.depth, inplace = true}, meta))
  109. end
  110. local function convert_frames(opt)
  111. local model_path, scale_model
  112. local noise_scale_model = {}
  113. local noise_model = {}
  114. local scale_f, image_f
  115. if opt.tta == 1 then
  116. scale_f = reconstruct.scale_tta
  117. image_f = reconstruct.image_tta
  118. else
  119. scale_f = reconstruct.scale
  120. image_f = reconstruct.image
  121. end
  122. if opt.m == "scale" then
  123. model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
  124. scale_model = torch.load(model_path, "ascii")
  125. if not scale_model then
  126. error("Load Error: " .. model_path)
  127. end
  128. elseif opt.m == "noise" then
  129. model_path = path.join(opt.model_dir, string.format("noise%d_model.t7", opt.noise_level))
  130. noise_model[opt.noise_level] = torch.load(model_path, "ascii")
  131. if not noise_model[opt.noise_level] then
  132. error("Load Error: " .. model_path)
  133. end
  134. elseif opt.m == "noise_scale" then
  135. local model_path = path.join(opt.model_dir, ("noise%d_scale%.1fx_model.t7"):format(opt.noise_level, opt.scale))
  136. if path.exists(model_path) then
  137. noise_scale_model[opt.noise_level] = torch.load(model_path, "ascii")
  138. if not noise_scale_model[opt.noise_level] then
  139. error("Load Error: " .. model_path)
  140. end
  141. model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
  142. scale_model = torch.load(model_path, "ascii")
  143. if not scale_model then
  144. error("Load Error: " .. model_path)
  145. end
  146. else
  147. model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
  148. scale_model = torch.load(model_path, "ascii")
  149. if not scale_model then
  150. error("Load Error: " .. model_path)
  151. end
  152. model_path = path.join(opt.model_dir, string.format("noise%d_model.t7", opt.noise_level))
  153. noise_model[opt.noise_level] = torch.load(model_path, "ascii")
  154. if not noise_model[opt.noise_level] then
  155. error("Load Error: " .. model_path)
  156. end
  157. end
  158. end
  159. local fp = io.open(opt.l)
  160. if not fp then
  161. error("Open Error: " .. opt.l)
  162. end
  163. local count = 0
  164. local lines = {}
  165. for line in fp:lines() do
  166. table.insert(lines, line)
  167. end
  168. fp:close()
  169. for i = 1, #lines do
  170. local output = format_output(opt, lines[i], i)
  171. if opt.resume == 0 or path.exists(output) == false then
  172. local x, meta = image_loader.load_float(lines[i])
  173. local alpha = meta.alpha
  174. local new_x = nil
  175. if opt.m == "noise" then
  176. new_x = image_f(noise_model[opt.noise_level], x, opt.crop_size)
  177. new_x = alpha_util.composite(new_x, alpha)
  178. elseif opt.m == "scale" then
  179. x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
  180. new_x = scale_f(scale_model, opt.scale, x, opt.crop_size, opt.upsampling_filter)
  181. new_x = alpha_util.composite(new_x, alpha, scale_model)
  182. elseif opt.m == "noise_scale" then
  183. x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
  184. if noise_scale_model[opt.noise_level] then
  185. new_x = scale_f(noise_scale_model[opt.noise_level], opt.scale, x, opt.crop_size, upsampling_filter)
  186. else
  187. x = image_f(noise_model[opt.noise_level], x, opt.crop_size)
  188. new_x = scale_f(scale_model, opt.scale, x, opt.crop_size, upsampling_filter)
  189. end
  190. new_x = alpha_util.composite(new_x, alpha, scale_model)
  191. else
  192. error("undefined method:" .. opt.method)
  193. end
  194. image_loader.save_png(output, new_x,
  195. tablex.update({depth = opt.depth, inplace = true}, meta))
  196. xlua.progress(i, #lines)
  197. if i % 10 == 0 then
  198. collectgarbage()
  199. end
  200. else
  201. xlua.progress(i, #lines)
  202. end
  203. end
  204. end
  205. local function waifu2x()
  206. local cmd = torch.CmdLine()
  207. cmd:text()
  208. cmd:text("waifu2x")
  209. cmd:text("Options:")
  210. cmd:option("-i", "images/miku_small.png", 'path to input image')
  211. cmd:option("-l", "", 'path to image-list.txt')
  212. cmd:option("-scale", 2, 'scale factor')
  213. cmd:option("-o", "(auto)", 'path to output file')
  214. cmd:option("-depth", 8, 'bit-depth of the output image (8|16)')
  215. cmd:option("-model_dir", "./models/upconv_7/art", 'path to model directory')
  216. cmd:option("-m", "noise_scale", 'method (noise|scale|noise_scale)')
  217. cmd:option("-noise_level", 1, '(1|2|3)')
  218. cmd:option("-crop_size", 128, 'patch size per process')
  219. cmd:option("-resume", 0, "skip existing files (0|1)")
  220. cmd:option("-thread", -1, "number of CPU threads")
  221. cmd:option("-tta", 0, '8x slower and slightly high quality (0|1)')
  222. cmd:option("-upsampling_filter", "Box", 'upsampling filter (for dev)')
  223. local opt = cmd:parse(arg)
  224. if opt.thread > 0 then
  225. torch.setnumthreads(opt.thread)
  226. end
  227. if cudnn then
  228. cudnn.fastest = true
  229. cudnn.benchmark = false
  230. end
  231. if string.len(opt.l) == 0 then
  232. convert_image(opt)
  233. else
  234. convert_frames(opt)
  235. end
  236. end
  237. waifu2x()