|
@@ -6,6 +6,7 @@ require 'w2nn'
|
|
local iproc = require 'iproc'
|
|
local iproc = require 'iproc'
|
|
local reconstruct = require 'reconstruct'
|
|
local reconstruct = require 'reconstruct'
|
|
local image_loader = require 'image_loader'
|
|
local image_loader = require 'image_loader'
|
|
|
|
+local alpha_util = require 'alpha_util'
|
|
|
|
|
|
torch.setdefaulttensortype('torch.FloatTensor')
|
|
torch.setdefaulttensortype('torch.FloatTensor')
|
|
|
|
|
|
@@ -14,6 +15,7 @@ local function convert_image(opt)
|
|
local new_x = nil
|
|
local new_x = nil
|
|
local t = sys.clock()
|
|
local t = sys.clock()
|
|
local scale_f, image_f
|
|
local scale_f, image_f
|
|
|
|
+
|
|
if opt.tta == 1 then
|
|
if opt.tta == 1 then
|
|
scale_f = reconstruct.scale_tta
|
|
scale_f = reconstruct.scale_tta
|
|
image_f = reconstruct.image_tta
|
|
image_f = reconstruct.image_tta
|
|
@@ -34,13 +36,16 @@ local function convert_image(opt)
|
|
error("Load Error: " .. model_path)
|
|
error("Load Error: " .. model_path)
|
|
end
|
|
end
|
|
new_x = image_f(model, x, opt.crop_size)
|
|
new_x = image_f(model, x, opt.crop_size)
|
|
|
|
+ new_x = alpha_util.composite(new_x, alpha)
|
|
elseif opt.m == "scale" then
|
|
elseif opt.m == "scale" then
|
|
local model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
|
|
local model_path = path.join(opt.model_dir, ("scale%.1fx_model.t7"):format(opt.scale))
|
|
local model = torch.load(model_path, "ascii")
|
|
local model = torch.load(model_path, "ascii")
|
|
if not model then
|
|
if not model then
|
|
error("Load Error: " .. model_path)
|
|
error("Load Error: " .. model_path)
|
|
end
|
|
end
|
|
|
|
+ x = alpha_util.make_border(x, alpha, reconstruct.offset_size(model))
|
|
new_x = scale_f(model, opt.scale, x, opt.crop_size)
|
|
new_x = scale_f(model, opt.scale, x, opt.crop_size)
|
|
|
|
+ new_x = alpha_util.composite(new_x, alpha, model)
|
|
elseif opt.m == "noise_scale" then
|
|
elseif opt.m == "noise_scale" then
|
|
local noise_model_path = path.join(opt.model_dir, ("noise%d_model.t7"):format(opt.noise_level))
|
|
local noise_model_path = path.join(opt.model_dir, ("noise%d_model.t7"):format(opt.noise_level))
|
|
local noise_model = torch.load(noise_model_path, "ascii")
|
|
local noise_model = torch.load(noise_model_path, "ascii")
|
|
@@ -53,15 +58,14 @@ local function convert_image(opt)
|
|
if not scale_model then
|
|
if not scale_model then
|
|
error("Load Error: " .. scale_model_path)
|
|
error("Load Error: " .. scale_model_path)
|
|
end
|
|
end
|
|
|
|
+ x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
|
|
x = image_f(noise_model, x, opt.crop_size)
|
|
x = image_f(noise_model, x, opt.crop_size)
|
|
new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
|
|
new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
|
|
|
|
+ new_x = alpha_util.composite(new_x, alpha, scale_model)
|
|
else
|
|
else
|
|
error("undefined method:" .. opt.method)
|
|
error("undefined method:" .. opt.method)
|
|
end
|
|
end
|
|
- if opt.white_noise == 1 then
|
|
|
|
- new_x = iproc.white_noise(new_x, opt.white_noise_std, {1.0, 0.8, 1.0})
|
|
|
|
- end
|
|
|
|
- image_loader.save_png(opt.o, new_x, alpha, opt.depth)
|
|
|
|
|
|
+ image_loader.save_png(opt.o, new_x, opt.depth)
|
|
print(opt.o .. ": " .. (sys.clock() - t) .. " sec")
|
|
print(opt.o .. ": " .. (sys.clock() - t) .. " sec")
|
|
end
|
|
end
|
|
local function convert_frames(opt)
|
|
local function convert_frames(opt)
|
|
@@ -128,23 +132,27 @@ local function convert_frames(opt)
|
|
local new_x = nil
|
|
local new_x = nil
|
|
if opt.m == "noise" and opt.noise_level == 1 then
|
|
if opt.m == "noise" and opt.noise_level == 1 then
|
|
new_x = image_f(noise1_model, x, opt.crop_size)
|
|
new_x = image_f(noise1_model, x, opt.crop_size)
|
|
|
|
+ new_x = alpha_util.composite(new_x, alpha)
|
|
elseif opt.m == "noise" and opt.noise_level == 2 then
|
|
elseif opt.m == "noise" and opt.noise_level == 2 then
|
|
new_x = image_func(noise2_model, x, opt.crop_size)
|
|
new_x = image_func(noise2_model, x, opt.crop_size)
|
|
|
|
+ new_x = alpha_util.composite(new_x, alpha)
|
|
elseif opt.m == "scale" then
|
|
elseif opt.m == "scale" then
|
|
|
|
+ x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
|
|
new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
|
|
new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
|
|
|
|
+ new_x = alpha_util.composite(new_x, alpha, scale_model)
|
|
elseif opt.m == "noise_scale" and opt.noise_level == 1 then
|
|
elseif opt.m == "noise_scale" and opt.noise_level == 1 then
|
|
|
|
+ x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
|
|
x = image_f(noise1_model, x, opt.crop_size)
|
|
x = image_f(noise1_model, x, opt.crop_size)
|
|
new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
|
|
new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
|
|
|
|
+ new_x = alpha_util.composite(new_x, alpha, scale_model)
|
|
elseif opt.m == "noise_scale" and opt.noise_level == 2 then
|
|
elseif opt.m == "noise_scale" and opt.noise_level == 2 then
|
|
|
|
+ x = alpha_util.make_border(x, alpha, reconstruct.offset_size(scale_model))
|
|
x = image_f(noise2_model, x, opt.crop_size)
|
|
x = image_f(noise2_model, x, opt.crop_size)
|
|
new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
|
|
new_x = scale_f(scale_model, opt.scale, x, opt.crop_size)
|
|
|
|
+ new_x = alpha_util.composite(new_x, alpha, scale_model)
|
|
else
|
|
else
|
|
error("undefined method:" .. opt.method)
|
|
error("undefined method:" .. opt.method)
|
|
end
|
|
end
|
|
- if opt.white_noise == 1 then
|
|
|
|
- new_x = iproc.white_noise(new_x, opt.white_noise_std, {1.0, 0.8, 1.0})
|
|
|
|
- end
|
|
|
|
-
|
|
|
|
local output = nil
|
|
local output = nil
|
|
if opt.o == "(auto)" then
|
|
if opt.o == "(auto)" then
|
|
local name = path.basename(lines[i])
|
|
local name = path.basename(lines[i])
|
|
@@ -154,7 +162,7 @@ local function convert_frames(opt)
|
|
else
|
|
else
|
|
output = string.format(opt.o, i)
|
|
output = string.format(opt.o, i)
|
|
end
|
|
end
|
|
- image_loader.save_png(output, new_x, alpha, opt.depth)
|
|
|
|
|
|
+ image_loader.save_png(output, new_x, opt.depth)
|
|
xlua.progress(i, #lines)
|
|
xlua.progress(i, #lines)
|
|
if i % 10 == 0 then
|
|
if i % 10 == 0 then
|
|
collectgarbage()
|
|
collectgarbage()
|
|
@@ -182,8 +190,6 @@ local function waifu2x()
|
|
cmd:option("-resume", 0, "skip existing files (0|1)")
|
|
cmd:option("-resume", 0, "skip existing files (0|1)")
|
|
cmd:option("-thread", -1, "number of CPU threads")
|
|
cmd:option("-thread", -1, "number of CPU threads")
|
|
cmd:option("-tta", 0, '8x slower and slightly high quality (0|1)')
|
|
cmd:option("-tta", 0, '8x slower and slightly high quality (0|1)')
|
|
- cmd:option("-white_noise", 0, 'adding white noise to output image (0|1)')
|
|
|
|
- cmd:option("-white_noise_std", 0.0055, 'standard division of white noise')
|
|
|
|
|
|
|
|
local opt = cmd:parse(arg)
|
|
local opt = cmd:parse(arg)
|
|
if opt.thread > 0 then
|
|
if opt.thread > 0 then
|