Browse Source

multi GPU server

nagadomi 10 years ago
parent
commit
0a683fcb34
2 changed files with 21 additions and 6 deletions
  1. 8 1
      appendix/run-web.sh
  2. 13 5
      web.lua

+ 8 - 1
appendix/run-web.sh

@@ -1,4 +1,11 @@
 #!/bin/zsh
 # waifu2x daemon script
+gpu=1
+port=8812
+if [ $# -eq 2 ]; then
+    gpu=$1
+    port=$2
+fi
 source /home/ubuntu/.zshrc
-stdbuf -o 0 th web.lua  >> ./waifu2x.log 2>&1
+echo stdbuf -o 0 th web.lua -gpu $gpu -port $port >> ./waifu2x_${port}.log 2>&1
+stdbuf -o 0 th web.lua -gpu $gpu -port $port >> ./waifu2x_${port}.log 2>&1

+ 13 - 5
web.lua

@@ -4,13 +4,21 @@ local uuid = require 'uuid'
 local ffi = require 'ffi'
 local md5 = require 'md5'
 require 'pl'
-
-torch.setdefaulttensortype('torch.FloatTensor')
-torch.setnumthreads(4)
-
 require './lib/portable'
 require './lib/LeakyReLU'
 
+local cmd = torch.CmdLine()
+cmd:text()
+cmd:text("waifu2x-api")
+cmd:text("Options:")
+cmd:option("-port", 8812, 'listen port')
+cmd:option("-gpu", 1, 'Device ID')
+cmd:option("-core", 2, 'number of CPU cores')
+local opt = cmd:parse(arg)
+cutorch.setDevice(opt.gpu)
+torch.setdefaulttensortype('torch.FloatTensor')
+torch.setnumthreads(opt.core)
+
 local iproc = require './lib/iproc'
 local reconstruct = require './lib/reconstruct'
 local image_loader = require './lib/image_loader'
@@ -196,5 +204,5 @@ local app = turbo.web.Application:new(
       {"^/api$", APIHandler},
    }
 )
-app:listen(8812, "0.0.0.0", {max_body_size = CURL_MAX_SIZE})
+app:listen(opt.port, "0.0.0.0", {max_body_size = CURL_MAX_SIZE})
 turbo.ioloop.instance():start()