├── tunnel ├── init.lua ├── share.lua ├── plural.lua ├── printer.lua ├── block.lua ├── counter.lua ├── atomic.lua ├── serializer.lua ├── hash.lua └── vector.lua ├── rockspec └── tunnel-scm-1.rockspec ├── unittest ├── share.lua ├── printer.lua ├── plural.lua ├── atomic.lua ├── block.lua ├── serializer.lua ├── counter.lua ├── hash.lua └── vector.lua ├── LICENSE └── README.md /tunnel/init.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Tunnel initialization 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | tunnel = tunnel or {} 7 | 8 | require('tunnel.atomic') 9 | require('tunnel.block') 10 | require('tunnel.counter') 11 | require('tunnel.hash') 12 | require('tunnel.plural') 13 | require('tunnel.printer') 14 | require('tunnel.serializer') 15 | require('tunnel.share') 16 | require('tunnel.vector') 17 | 18 | return tunnel 19 | -------------------------------------------------------------------------------- /rockspec/tunnel-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = 'tunnel' 2 | version = 'scm-1' 3 | 4 | source = { 5 | url = 'git://github.com/zhangxiangxiao/tunnel.git' 6 | } 7 | 8 | description = { 9 | summary = 'Tunnel', 10 | detailed = 'A data driven framework for distributed computing in Torch 7.', 11 | homepage = 'https://github.com/zhangxiangxiao/tunnel', 12 | license = 'BSD' 13 | } 14 | 15 | dependencies = { 16 | 'lua >= 5.1', 17 | 'torch >= 7.0', 18 | 'threads', 19 | 'tds' 20 | } 21 | 22 | build = { 23 | type = 'builtin', 24 | modules = { 25 | ['tunnel.init'] = "tunnel/init.lua", 26 | ['tunnel.atomic'] = 'tunnel/atomic.lua', 27 | ['tunnel.block'] = 'tunnel/block.lua', 28 | ['tunnel.counter'] = 'tunnel/counter.lua', 29 | ['tunnel.hash'] = 'tunnel/hash.lua', 30 | ['tunnel.plural'] = 'tunnel/plural.lua', 31 | ['tunnel.printer'] = 'tunnel/printer.lua', 32 | ['tunnel.serializer'] = 'tunnel/serializer.lua', 33 | ['tunnel.share'] = 'tunnel/share.lua', 34 | ['tunnel.vector'] = 'tunnel/vector.lua' 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /tunnel/share.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Multi-threaded shared serialization wrapper 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local torch = require('torch') 7 | 8 | local Serializer = require('tunnel.serializer') 9 | 10 | tunnel = tunnel or {} 11 | 12 | -- Append an underscore to distinguish between metatable and class name 13 | local Share_ = torch.class('tunnel.Share') 14 | 15 | -- Constructor 16 | -- data: the data to be shared. Must be shared serializable. 17 | function Share_:__init(data) 18 | self.data = data 19 | self.serializer = Serializer() 20 | return self 21 | end 22 | 23 | -- Access function 24 | function Share_:access(callback) 25 | return callback(self.data) 26 | end 27 | 28 | -- Serialization of this object 29 | function Share_:__write(f) 30 | local data = self.serializer:save(self.data) 31 | f:writeObject(data) 32 | end 33 | 34 | -- Deserialization of this object 35 | function Share_:__read(f) 36 | if not self.serializer then 37 | self.serializer = Serializer() 38 | end 39 | local data = f:readObject() 40 | self.data = self.serializer:load(data) 41 | end 42 | 43 | -- Return the class, not the metatable 44 | return tunnel.Share 45 | -------------------------------------------------------------------------------- /unittest/share.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Unittest for Atomic 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local Share = require('tunnel.share') 7 | 8 | local torch = require('torch') 9 | 10 | local Block = require('tunnel.block') 11 | local Printer = require('tunnel.printer') 12 | 13 | -- A Logic Named Joe 14 | local joe = {} 15 | 16 | function joe.main() 17 | if joe.init then 18 | print('Initializing testing environment') 19 | joe:init() 20 | end 21 | for name, func in pairs(joe) do 22 | if type(name) == 'string' and type(func) == 'function' 23 | and name:match('[%g]+Test') then 24 | print('\nExecuting '..name) 25 | func(joe) 26 | end 27 | end 28 | end 29 | 30 | function joe:init() 31 | self.share_data = Share(torch.DoubleTensor(1)) 32 | self.printer = Printer() 33 | self.printer('main', torch.pointer(self.share_data.data:storage())) 34 | end 35 | 36 | function joe:shareTest() 37 | -- Create a block of 3 threads 38 | local block = Block(3, function () require('torch') end) 39 | block:add(self.printer, self.share_data) 40 | block:run( 41 | function (printer, share_data) 42 | local torch = require('torch') 43 | share_data:access( 44 | function (data) 45 | printer('share_access', torch.pointer(data:storage())) 46 | end) 47 | end) 48 | block:synchronize() 49 | end 50 | 51 | joe.main() 52 | return joe 53 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016, Xiang Zhang 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of tunnel nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /tunnel/plural.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Plural wrapper 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | tunnel = tunnel or {} 7 | 8 | local Counter = require('tunnel.counter') 9 | local Hash = require('tunnel.hash') 10 | local Vector = require('tunnel.vector') 11 | 12 | -- Append an underscore to distinguish between metatable and class name 13 | local Plural_ = torch.class('tunnel.Plural') 14 | 15 | -- Constructor 16 | function Plural_:__init(size, callback, ...) 17 | self.data = {} 18 | for i = 1, size do 19 | self.data[i] = callback(...) 20 | end 21 | 22 | return self 23 | end 24 | 25 | -- Do something for all of the data object 26 | function Plural_:doAll(callback) 27 | local ret = {} 28 | for index, value in ipairs(self.data) do 29 | ret[index] = callback(index, value) 30 | end 31 | return ret 32 | end 33 | 34 | -- Get the object 35 | function Plural_:get(index) 36 | return self.data[index] 37 | end 38 | 39 | -- Set the object 40 | function Plural_:set(index, value) 41 | self.data[index] = value 42 | end 43 | 44 | -- The index operator 45 | function Plural_:__index(index) 46 | if type(index) == 'number' then 47 | return self:get(index) 48 | else 49 | local method = Plural_[index] 50 | if method then 51 | return method 52 | else 53 | error('Invalid index (number) or method name') 54 | end 55 | end 56 | end 57 | 58 | -- The new index operator 59 | function Plural_:__newindex(index, value) 60 | if type(index) == 'number' then 61 | return self:set(index, value) 62 | else 63 | rawset(self, index, value) 64 | end 65 | end 66 | 67 | -- The length operator 68 | function Plural_:__len() 69 | return #self.data 70 | end 71 | 72 | -- Shortcut for counter 73 | function tunnel.Counters(size, ...) 74 | return tunnel.Plural(size, Counter, ...) 75 | end 76 | 77 | -- Shortcut for hash table 78 | function tunnel.Hashes(size, ...) 79 | return tunnel.Plural(size, Hash, ...) 80 | end 81 | 82 | -- Shortcut for vector 83 | function tunnel.Vectors(size, ...) 84 | return tunnel.Plural(size, Vector, ...) 85 | end 86 | 87 | return tunnel.Plural 88 | -------------------------------------------------------------------------------- /unittest/printer.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Unittest for Printer 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local Printer = require('tunnel.printer') 7 | 8 | local Block = require('tunnel.block') 9 | 10 | -- A Logic Named Joe 11 | local joe = {} 12 | 13 | function joe.main() 14 | if joe.init then 15 | print('Initializing testing environment') 16 | joe:init() 17 | end 18 | for name, func in pairs(joe) do 19 | if type(name) == 'string' and type(func) == 'function' 20 | and name:match('[%g]+Test') then 21 | print('\nExecuting '..name) 22 | func(joe) 23 | end 24 | end 25 | end 26 | 27 | function joe:init() 28 | print('Creating a block of 5 threads') 29 | self.block = Block(5) 30 | print('Creating the printer') 31 | self.printer = Printer() 32 | print('Connecting the block with the printer') 33 | self.block:add(self.printer) 34 | end 35 | 36 | function joe:printerTest() 37 | local block = self.block 38 | local printer_job = function(printer) 39 | local ffi = require('ffi') 40 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 41 | for i = 1, 4 do 42 | printer('print_sync', __threadid, i) 43 | printer:printAsync('print_async', __threadid, i) 44 | ffi.C.sleep(1) 45 | end 46 | for i = 1, 4 do 47 | printer:write( 48 | 'write_sync', '\t', tostring(__threadid), '\t', tostring(i), '\n') 49 | printer:writeAsync( 50 | 'write_async', '\t', tostring(__threadid), '\t', tostring(i), '\n') 51 | ffi.C.sleep(1) 52 | end 53 | for i = 1, 4 do 54 | printer:access( 55 | function () 56 | print('access_sync', __threadid, i) 57 | ffi.C.sleep(1) 58 | end) 59 | local status = printer:accessAsync( 60 | function () 61 | print('access_async', __threadid, i) 62 | ffi.C.sleep(1) 63 | return true 64 | end) 65 | printer('access_async', __threadid, i, 'blocked') 66 | end 67 | end 68 | block:run(printer_job) 69 | end 70 | 71 | joe.main() 72 | return joe 73 | -------------------------------------------------------------------------------- /tunnel/printer.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Multi-threaded exclusive printer 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | tunnel = tunnel or {} 7 | 8 | local Atomic = require('tunnel.atomic') 9 | 10 | -- Append an underscore to distinguish between metatable and class name 11 | local Printer_ = torch.class('tunnel.Printer') 12 | 13 | -- Constructor 14 | function Printer_:__init() 15 | self.atomic = Atomic() 16 | end 17 | 18 | -- Print information 19 | function Printer_:print(...) 20 | local arg = {...} 21 | return self.atomic:write( 22 | function () 23 | print(unpack(arg)) 24 | return true 25 | end) 26 | end 27 | 28 | -- Print information asynchronously 29 | function Printer_:printAsync(...) 30 | local arg = {...} 31 | return self.atomic:writeAsync( 32 | function () 33 | print(unpack(arg)) 34 | return true 35 | end) 36 | end 37 | 38 | -- Write information using io.write 39 | function Printer_:write(...) 40 | local io = require('io') 41 | local arg = {...} 42 | return self.atomic:write( 43 | function () 44 | io.write(unpack(arg)) 45 | return true 46 | end) 47 | end 48 | 49 | -- Write information using io.write 50 | function Printer_:writeAsync(...) 51 | local io = require('io') 52 | local arg = {...} 53 | return self.atomic:writeAsync( 54 | function () 55 | io.write(unpack(arg)) 56 | return true 57 | end) 58 | end 59 | 60 | -- Access the printing 61 | function Printer_:access(callback) 62 | return self.atomic:write(function () return callback() end) 63 | end 64 | 65 | -- Access the printing 66 | function Printer_:accessAsync(callback) 67 | return self.atomic:writeAsync(function () return callback() end) 68 | end 69 | 70 | -- Call operator corresponds to print 71 | function Printer_:__call(...) 72 | self:print(...) 73 | end 74 | 75 | -- Serialization of this object 76 | function Printer_:__write(f) 77 | f:writeObject(self.atomic) 78 | end 79 | 80 | -- Deserialization of this object 81 | function Printer_:__read(f) 82 | self.atomic = f:readObject() 83 | end 84 | 85 | -- Return the class, not the metatable 86 | return tunnel.Printer 87 | -------------------------------------------------------------------------------- /tunnel/block.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Thread manager 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local threads = require('threads') 7 | local torch = require('torch') 8 | 9 | local Vector = require('tunnel.vector') 10 | 11 | tunnel = tunnel or {} 12 | 13 | -- Append an underscore to distinguish between metatable and class name 14 | local Block_ = torch.class('tunnel.Block') 15 | 16 | -- Constructor 17 | -- size: the number of threads in the block 18 | -- callback: a function used to initialize the threads. 19 | function Block_:__init(size, callback) 20 | self.size = size or 1 21 | 22 | local init_job = self:initJob(callback) 23 | self.block = threads.Threads(self.size, init_job) 24 | self.block:specific(true) 25 | 26 | self.data = {} 27 | self.result = {} 28 | self.status = {} 29 | self.count = 0 30 | end 31 | 32 | function Block_:add(...) 33 | local data_objects = {...} 34 | for _, data in ipairs(data_objects) do 35 | self.data[#self.data + 1] = data 36 | end 37 | return self 38 | end 39 | 40 | function Block_:run(callback) 41 | self.count = self.count + 1 42 | local result, status = Vector(), Vector() 43 | for i = 1, self.size do 44 | result[i] = nil 45 | status[i] = nil 46 | end 47 | self.result[self.count] = result 48 | self.status[self.count] = status 49 | for i = 1, self.size do 50 | local job = self:runJob(i, callback) 51 | self.block:addjob(i, job) 52 | end 53 | return self.count 54 | end 55 | 56 | function Block_:getResult(count) 57 | local count = count or self.count 58 | return self.result[count], self.status[count] 59 | end 60 | 61 | function Block_:synchronize() 62 | self.block:synchronize() 63 | return self:getResult() 64 | end 65 | 66 | function Block_:terminate() 67 | self.block:terminate() 68 | return self:getResult() 69 | end 70 | 71 | function Block_:initJob(callback) 72 | return function () 73 | require('tunnel') 74 | if callback ~= nil then 75 | callback() 76 | end 77 | end 78 | end 79 | 80 | function Block_:runJob(index, callback) 81 | local data = self.data 82 | local result = self.result[self.count] 83 | local status = self.status[self.count] 84 | return function () 85 | -- Execute the callback 86 | local pack = function (status, ...) 87 | return status, {...} 88 | end 89 | status[index], result[index] = pack(pcall(callback, unpack(data))) 90 | end 91 | end 92 | 93 | function Block_:__len() 94 | return self.size 95 | end 96 | 97 | return tunnel.Block 98 | -------------------------------------------------------------------------------- /unittest/plural.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Unittest for Plural 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local Plural = require('tunnel.plural') 7 | 8 | local Block = require('tunnel.block') 9 | local Counter = require('tunnel.counter') 10 | local Printer = require('tunnel.printer') 11 | local Vector = require('tunnel.vector') 12 | 13 | -- A Logic Named Joe 14 | local joe = {} 15 | 16 | function joe.main() 17 | if joe.init then 18 | print('Initializing testing environment') 19 | joe:init() 20 | end 21 | for name, func in pairs(joe) do 22 | if type(name) == 'string' and type(func) == 'function' 23 | and name:match('[%g]+Test') then 24 | print('\nExecuting '..name) 25 | func(joe) 26 | end 27 | end 28 | end 29 | 30 | function joe:init() 31 | local printer = Printer() 32 | local vectors = Plural(3, Vector, 3) 33 | printer('Initialized vectors', #vectors) 34 | self.printer = printer 35 | self.vectors = vectors 36 | end 37 | 38 | function joe:pluralTest() 39 | local ret = self.vectors:doAll( 40 | function (index, vector) 41 | vector:pushBack('do_all '..index) 42 | return true 43 | end) 44 | for i, v in ipairs(ret) do 45 | self.printer('ret', i, v) 46 | end 47 | 48 | local popback_counter = Counter() 49 | local popback_block = Block(#self.vectors) 50 | popback_block:add(popback_counter, self.printer, self.vectors) 51 | popback_block:run(self:popBackJob()) 52 | 53 | local pushback_counter = Counter() 54 | local pushback_block = Block(#self.vectors) 55 | pushback_block:add(pushback_counter, self.printer, self.vectors) 56 | pushback_block:run(self:pushBackJob()) 57 | 58 | local status, result = popback_block:synchronize() 59 | for i, v in ipairs(status) do 60 | if v == false then 61 | self.printer('Error in popback_block', i, unpack(result[i])) 62 | end 63 | end 64 | local status, result = pushback_block:synchronize() 65 | for i, v in ipairs(status) do 66 | if v == false then 67 | self.printer('Error in pushback_block', i, unpack(result[i])) 68 | end 69 | end 70 | end 71 | 72 | function joe:popBackJob() 73 | return function (counter, printer, vectors) 74 | local id = counter:increase() 75 | printer('popback', __threadid, 'id', id) 76 | for i = 1, 60 do 77 | local value = vectors[id]:popBack(value) 78 | printer('popback', __threadid, id, i, value) 79 | end 80 | end 81 | end 82 | 83 | function joe:pushBackJob() 84 | return function (counter, printer, vectors) 85 | local math = require('math') 86 | local os = require('os') 87 | local id = counter:increase() 88 | printer('pushback', __threadid, 'id', id) 89 | math.randomseed(10000 + id * 1000 + os.time()) 90 | for i = 1, 60 do 91 | local value = math.random(100) 92 | vectors[id]:pushBack(value) 93 | printer('pushback', __threadid, id, i, value) 94 | end 95 | end 96 | end 97 | 98 | joe.main() 99 | return joe 100 | -------------------------------------------------------------------------------- /tunnel/counter.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Multi-threaded atomic counter 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local torch = require('torch') 7 | 8 | local Atomic = require('tunnel.atomic') 9 | 10 | tunnel = tunnel or {} 11 | 12 | -- Append an understore to distinguish between metatable and class name 13 | local Counter_ = torch.class('tunnel.Counter') 14 | 15 | -- Constructor 16 | -- value: the initial value of the counter. Default is 0. 17 | function Counter_:__init(value) 18 | self.count = Atomic(torch.LongTensor(1):fill(value or 0)) 19 | return self 20 | end 21 | 22 | -- Increase the counter 23 | -- step: the step to increase. Default is 1. 24 | function Counter_:increase(step) 25 | return self.count:write( 26 | function (count) 27 | count:add(step or 1) 28 | return count[1] 29 | end) 30 | end 31 | 32 | -- Increase the counter asynchronously 33 | -- step: the step to increase. Default is 1. 34 | function Counter_:increaseAsync(step) 35 | local step = step or 1 36 | return self.count:writeAsync( 37 | function (count) 38 | count:add(step) 39 | return count[1] 40 | end) 41 | end 42 | 43 | -- Decrease the counter 44 | -- step: the step to decrease. Default is 1. 45 | function Counter_:decrease(step) 46 | local step = step or 1 47 | return self.count:write( 48 | function (count) 49 | count:add(step) 50 | return count[1] 51 | end) 52 | end 53 | 54 | -- Decrease the counter asynchronously 55 | -- step: the step to decrease. Default is 1. 56 | function Counter_:decreaseAsync(step) 57 | return self.count:writeAsync( 58 | function (count) 59 | count:add(-step or -1) 60 | return count[1] 61 | end) 62 | end 63 | 64 | -- Get the value of the counter 65 | function Counter_:get() 66 | return self.count:read( 67 | function (count) 68 | return count[1] 69 | end) 70 | end 71 | 72 | -- Get the value of the counter asynchronously 73 | function Counter_:getAsync() 74 | return self.count:readAsync( 75 | function (count) 76 | return count[1] 77 | end) 78 | end 79 | 80 | -- Set the value of the counter 81 | function Counter_:set(value) 82 | return self.count:write( 83 | function (count) 84 | local old_value = count[1] 85 | count[1] = value 86 | return old_value 87 | end) 88 | end 89 | 90 | -- Set the value of the counter asynchronously 91 | function Counter_:setAsync(value) 92 | return self.count:writeAsync( 93 | function (count) 94 | local old_value = count[1] 95 | count[1] = value 96 | return old_value 97 | end) 98 | end 99 | 100 | -- Read the value of the counter 101 | function Counter_:read(callback) 102 | return self.count:read( 103 | function (count) 104 | return callback(count[1]) 105 | end) 106 | end 107 | 108 | -- Read the value of the counter asynchronously 109 | function Counter_:readAsync(callback) 110 | return self.count:readAsync( 111 | function (count) 112 | return callback(count[1]) 113 | end) 114 | end 115 | 116 | -- Write the value of the counter 117 | function Counter_:write(callback) 118 | return self.count:write( 119 | function (count) 120 | count[1] = callback(count[1]) 121 | return count[1] 122 | end) 123 | end 124 | 125 | -- Write the value of the counter asynchronously 126 | function Counter_:writeAsync(callback) 127 | return self.count:writeAsync( 128 | function (count) 129 | count[1] = callback(count[1]) 130 | return count[1] 131 | end) 132 | end 133 | 134 | -- Serialization write 135 | function Counter_:__write(f) 136 | f:writeObject(self.count) 137 | end 138 | 139 | -- Serialization read 140 | function Counter_:__read(f) 141 | self.count = f:readObject() 142 | end 143 | 144 | return tunnel.Counter 145 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tunnel 2 | 3 | Tunnel is a data driven framework for distributed computing in Torch 7. 4 | 5 | Currently tunnel is still a prototype. See [issues](https://github.com/zhangxiangxiao/tunnel/issues) for any bugs that may make it unfit for your purposes. 6 | 7 | ## Why data driven? 8 | 9 | Machine learning systems are intrinsically data-driven. There is data everywhere -- including training and testing samples, the model parameters, and various state used in different algorithms. This means that by carefully designing and implementing various synchronized data structures, we can make it possible to program distributed machine learning systems without a single line of synchronization code. It is in contrast with program-driven methodology where the programmer has to take care of mutexes, condition variables, semaphores and message passing himself. 10 | 11 | ## Documentation 12 | 13 | See the [doc directory](https://github.com/zhangxiangxiao/tunnel/tree/master/doc). 14 | 15 | ## Installation 16 | 17 | You can install tunnel using luarocks 18 | ```bash 19 | $ git clone git@github.com:zhangxiangxiao/tunnel.git 20 | $ cd tunnel 21 | $ luarocks make rockspec/tunnel-scm-1.rockspec 22 | ``` 23 | 24 | Tunnel requires the following prerequisites. 25 | * [Torch 7](https://github.com/torch/torch7) 26 | * [threads](https://github.com/torch/threads) 27 | * [tds](https://github.com/torch/tds) 28 | 29 | Luarocks should be able to install them automatically. 30 | 31 | ## Example: Consumer-Producer Problem 32 | 33 | Here is an example that demonstrates how to write a [producer-consumer problem](https://en.wikipedia.org/wiki/Producer%E2%80%93consumer_problem) solver without a single line of synchronization code using tunnel. 34 | ```lua 35 | local tunnel = require('tunnel') 36 | 37 | -- This function produces 6 items and put it in vector. 38 | local function produce(vector, printer) 39 | for i = 1, 6 do 40 | local product = __threadid * 1000 + i 41 | vector:pushBack(product) 42 | printer('produce', __threadid, i, product) 43 | end 44 | end 45 | 46 | -- This function takes 4 items from vector and takes 1 second to consume each. 47 | local function consume(vector, printer) 48 | local ffi = require('ffi') 49 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 50 | for i = 1, 4 do 51 | local product = vector:popFront() 52 | printer('consume', __threadid, i, product) 53 | -- Pretend it takes 1 second to consume a product 54 | ffi.C.sleep(1) 55 | end 56 | end 57 | 58 | local function main() 59 | -- Create a syncronized vector with size hint 4 60 | local vector = tunnel.Vector(4) 61 | -- Create an atomically guarded printer 62 | local printer = tunnel.Printer() 63 | 64 | -- Create a 2 producer threads and 3 consumer threads 65 | -- In total they produce 12 products, and consume 12 products. 66 | local producer_block = tunnel.Block(2) 67 | local consumer_block = tunnel.Block(3) 68 | 69 | -- Link vector and printer with both producer and consumer blocks 70 | producer_block:add(vector, printer) 71 | consumer_block:add(vector, printer) 72 | 73 | -- Execute the producer and consumer threads 74 | producer_block:run(produce) 75 | consumer_block:run(consume) 76 | end 77 | 78 | -- Call the main function 79 | return main() 80 | ``` 81 | 82 | One possible output: 83 | ``` 84 | $ th producer_consumer.lua 85 | produce 1 1 1001 86 | produce 1 2 1002 87 | produce 1 3 1003 88 | produce 1 4 1004 89 | consume 1 1 1001 90 | produce 1 5 1005 91 | consume 2 1 1002 92 | produce 2 1 2001 93 | consume 3 1 1003 94 | produce 1 6 1006 95 | consume 1 2 1004 96 | produce 2 2 2002 97 | consume 2 2 1005 98 | produce 2 3 2003 99 | consume 3 2 2001 100 | produce 2 4 2004 101 | consume 1 3 1006 102 | produce 2 5 2005 103 | produce 2 6 2006 104 | consume 2 3 2002 105 | consume 3 3 2003 106 | consume 1 4 2004 107 | consume 2 4 2005 108 | consume 3 4 2006 109 | ``` 110 | 111 | ### How Does the Example Work? 112 | 113 | First of all, the `main` function created two synchronized data structures -- one is `vector` of type `tunnel.Vector` and the other `printer` of type `tunnel.Printer`. It then created two thread blocks representing 2 producer threads and 3 consumer threads, and add the data structures `vector` and `printer` to the blocks. For these thread blocks, when we call `block:run(callback)`, the `callback` function will obtain these data structures in order and be able to use them. The producer block runs the `produce` function, and the consumer block runs the `consume` function. 114 | 115 | When a thread in the producer block runs the `produce` function, it obtained the `vector` and `printer` data structures. After it produced a product, it will attempt to put it in `vector` by calling `pushBack`. However, when the size of `vector` will be larger than 4 (a size hint we used to initialize `vector` in `main` function), it will wait until a consumer has removed a product. Then, it calls `printer(..)` to print information synchronously. It can do so because the type `tunnel.Printer` wraps the print function in an exclusive mutex, such that it can be sure that only one thread (even in different blocks) is accessing the print functionality at a time. 116 | 117 | The consumer thread simply pop from `vector` by calling `popFront()` and print information in the same way using the same `printer` guard. It then pretends that it takes 1 second to consume the product before going to the next iteration. The call `vector:popFront()` is also synchronized, in the sense that when `vector` is empty it will wait untill a product is available then return. 118 | 119 | The example simply demonstrates usefulness of synchronized data structures and data-driven programming. 120 | 121 | ## Future Plans 122 | 123 | * Implement more synchronized data structures 124 | * Use [Redis](http://redis.io/) for across-machine (cluster) distributed computing 125 | -------------------------------------------------------------------------------- /unittest/atomic.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Unittest for Atomic 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local Atomic = require('tunnel.atomic') 7 | 8 | local threads = require('threads') 9 | local torch = require('torch') 10 | 11 | -- A Logic Named Joe 12 | local joe = {} 13 | 14 | function joe.main() 15 | if joe.init then 16 | print('Initializing testing environment') 17 | joe:init() 18 | end 19 | for name, func in pairs(joe) do 20 | if type(name) == 'string' and type(func) == 'function' 21 | and name:match('[%g]+Test') then 22 | print('\nExecuting '..name) 23 | func(joe) 24 | end 25 | end 26 | end 27 | 28 | function joe:init() 29 | local data = torch.DoubleTensor(1) 30 | local atomic_data = Atomic(data) 31 | self.atomic_data = atomic_data 32 | self.print_mutex = threads.Mutex() 33 | end 34 | 35 | function joe:readwriteTest() 36 | -- 3 synchronous readers 37 | local sync_reader_block = threads.Threads(3, self:threadInit()) 38 | sync_reader_block:specific(true) 39 | for i = 1, 3 do 40 | local job = self:syncReaderJob() 41 | sync_reader_block:addjob(i, job) 42 | end 43 | 44 | -- 2 asynchronous readers 45 | local async_reader_block = threads.Threads(2, self:threadInit()) 46 | async_reader_block:specific(true) 47 | for i = 1, 2 do 48 | local job = self:asyncReaderJob() 49 | async_reader_block:addjob(i, job) 50 | end 51 | 52 | -- 2 synchronous writers 53 | local sync_writer_block = threads.Threads(2, self:threadInit()) 54 | sync_writer_block:specific(true) 55 | for i = 1, 2 do 56 | local job = self:syncWriterJob() 57 | sync_writer_block:addjob(i, job) 58 | end 59 | 60 | -- 3 asynchronous writers 61 | local async_writer_block = threads.Threads(3, self:threadInit()) 62 | async_writer_block:specific(true) 63 | for i = 1, 3 do 64 | local job = self:asyncWriterJob() 65 | async_writer_block:addjob(i, job) 66 | end 67 | 68 | sync_reader_block:terminate() 69 | async_reader_block:terminate() 70 | sync_writer_block:terminate() 71 | async_writer_block:terminate() 72 | end 73 | 74 | function joe:threadInit() 75 | return function() 76 | local torch = require('torch') 77 | local Atomic = require('tunnel.atomic') 78 | end 79 | end 80 | 81 | function joe:syncWriterJob() 82 | local print_mutex_id = self.print_mutex:id() 83 | local atomic_data = self.atomic_data 84 | return function() 85 | local ffi = require('ffi') 86 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 87 | local threads = require('threads') 88 | local print_mutex = threads.Mutex(print_mutex_id) 89 | for i = 1, 10 do 90 | local callback = function (data) 91 | print_mutex:lock() 92 | print('sync_writer', __threadid, i, data[1], __threadid * 1000 + i) 93 | data[1] = __threadid * 1000 + i 94 | print_mutex:unlock() 95 | return true 96 | end 97 | if not atomic_data:write(callback) then 98 | print_mutex:lock() 99 | print('sync_writer', __threadid, i, 'writer blocked') 100 | print_mutex:unlock() 101 | end 102 | ffi.C.sleep(5) 103 | end 104 | end 105 | end 106 | 107 | function joe:syncReaderJob() 108 | local print_mutex_id = self.print_mutex:id() 109 | local atomic_data = self.atomic_data 110 | return function() 111 | local ffi = require('ffi') 112 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 113 | local threads = require('threads') 114 | local print_mutex = threads.Mutex(print_mutex_id) 115 | for i = 1, 10 do 116 | local callback = function (data) 117 | print_mutex:lock() 118 | print('sync_reader', __threadid, i, data[1]) 119 | print_mutex:unlock() 120 | return true 121 | end 122 | if not atomic_data:read(callback) then 123 | print_mutex:lock() 124 | print('sync_reader', __threadid, i, 'reader blocked') 125 | print_mutex:unlock() 126 | end 127 | ffi.C.sleep(5) 128 | end 129 | end 130 | end 131 | 132 | function joe:asyncWriterJob() 133 | local print_mutex_id = self.print_mutex:id() 134 | local atomic_data = self.atomic_data 135 | return function() 136 | local ffi = require('ffi') 137 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 138 | local threads = require('threads') 139 | local print_mutex = threads.Mutex(print_mutex_id) 140 | for i = 1, 40 do 141 | local callback = function (data) 142 | print_mutex:lock() 143 | print('async_writer', __threadid, i, data[1], __threadid * 1000 + i) 144 | data[1] = __threadid * 1000 + i 145 | print_mutex:unlock() 146 | ffi.C.sleep(1) 147 | return true 148 | end 149 | if not atomic_data:writeAsync(callback) then 150 | print_mutex:lock() 151 | print('async_writer', __threadid, i, 'writer blocked') 152 | print_mutex:unlock() 153 | ffi.C.sleep(2) 154 | end 155 | end 156 | end 157 | end 158 | 159 | function joe:asyncReaderJob() 160 | local print_mutex_id = self.print_mutex:id() 161 | local atomic_data = self.atomic_data 162 | return function() 163 | local ffi = require('ffi') 164 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 165 | local threads = require('threads') 166 | local print_mutex = threads.Mutex(print_mutex_id) 167 | for i = 1, 50 do 168 | local callback = function (data) 169 | print_mutex:lock() 170 | print('async_reader', __threadid, i, data[1]) 171 | print_mutex:unlock() 172 | ffi.C.sleep(1) 173 | return true 174 | end 175 | if not atomic_data:readAsync(callback) then 176 | print_mutex:lock() 177 | print('async_reader', __threadid, i, 'reader blocked') 178 | print_mutex:unlock() 179 | ffi.C.sleep(1) 180 | end 181 | end 182 | end 183 | end 184 | 185 | joe.main() 186 | return joe 187 | -------------------------------------------------------------------------------- /tunnel/atomic.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Multi-threaded reader-writer wrapper 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local threads = require('threads') 7 | local torch = require('torch') 8 | 9 | local Serializer = require('tunnel.serializer') 10 | 11 | tunnel = tunnel or {} 12 | 13 | -- Append an underscore to distinguish between metatable and class name 14 | local Atomic_ = torch.class('tunnel.Atomic') 15 | 16 | -- Constructor 17 | -- data: the data to be protected. Must be shared serializable. 18 | function Atomic_:__init(data) 19 | self.data = data 20 | self.serializer = Serializer() 21 | 22 | self.count = torch.LongTensor(2):fill(0) 23 | self.mutex = threads.Mutex() 24 | self.wrote_condition = threads.Condition() 25 | self.read_condition = threads.Condition() 26 | 27 | -- Lua 5.1 / LuaJIT garbage collection 28 | if newproxy then 29 | self.proxy = newproxy(true) 30 | getmetatable(self.proxy).__gc = function () self:__gc() end 31 | end 32 | 33 | return self 34 | end 35 | 36 | -- Synchronous exclusive writer 37 | -- callback: a callback that writes in data 38 | function Atomic_:write(callback) 39 | self.mutex:lock() 40 | while self.count[1] > 0 do 41 | self.wrote_condition:wait(self.mutex) 42 | end 43 | self.wrote_condition:signal() 44 | self.count[1] = self.count[1] + 1 45 | 46 | -- Waiting for other readers 47 | while self.count[2] > 0 do 48 | self.read_condition:wait(self.mutex) 49 | end 50 | self.read_condition:signal() 51 | self.mutex:unlock() 52 | 53 | -- Execute the write 54 | local status, result = self:pack(pcall(callback, self.data)) 55 | 56 | -- Release the write 57 | self.mutex:lock() 58 | self.count[1] = self.count[1] - 1 59 | self.mutex:unlock() 60 | self.wrote_condition:signal() 61 | 62 | if status == false then 63 | error(unpack(result)) 64 | end 65 | 66 | return unpack(result) 67 | end 68 | 69 | -- Synchrounous non-exclusive reader 70 | -- callback: a callback that reads from data 71 | function Atomic_:read(callback) 72 | self.mutex:lock() 73 | -- Waiting for other writers 74 | while self.count[1] > 0 do 75 | self.wrote_condition:wait(self.mutex) 76 | end 77 | self.wrote_condition:signal() 78 | self.count[2] = self.count[2] + 1 79 | self.mutex:unlock() 80 | 81 | -- Execute the read 82 | local status, result = self:pack(pcall(callback, self.data)) 83 | 84 | -- Release the read 85 | self.mutex:lock() 86 | self.count[2] = self.count[2] - 1 87 | self.mutex:unlock() 88 | self.read_condition:signal() 89 | 90 | if status == false then 91 | error(unpack(result)) 92 | end 93 | 94 | return unpack(result) 95 | end 96 | 97 | -- Asynchronous exclusive writer 98 | -- callback: a callback that writes in data 99 | -- Return immediately if cannot write, since there are other readers or writers 100 | function Atomic_:writeAsync(callback) 101 | -- Decide whether to write 102 | local decision = false 103 | if self.count[1] == 0 and self.count[2] == 0 then 104 | self.mutex:lock() 105 | if self.count[1] == 0 and self.count[2] == 0 then 106 | self.count[1] = self.count[1] + 1 107 | decision = true 108 | end 109 | self.mutex:unlock() 110 | end 111 | 112 | if decision == true then 113 | -- Execute write 114 | local status, result = self:pack(pcall(callback, self.data)) 115 | 116 | -- Release the write 117 | self.mutex:lock() 118 | self.count[1] = self.count[1] - 1 119 | self.mutex:unlock() 120 | self.wrote_condition:signal() 121 | 122 | if status == false then 123 | error(unpack(result)) 124 | end 125 | 126 | return unpack(result) 127 | end 128 | end 129 | 130 | -- Asynchronous non-exclusive reader 131 | -- callback: a callback and reads from data 132 | -- Return immediately if cannot read, since there are other writers 133 | function Atomic_:readAsync(callback) 134 | -- Decide whether to read 135 | local decision = false 136 | if self.count[1] == 0 then 137 | self.mutex:lock() 138 | if self.count[1] == 0 then 139 | self.count[2] = self.count[2] + 1 140 | decision = true 141 | end 142 | self.mutex:unlock() 143 | end 144 | 145 | if decision == true then 146 | -- Execute the read 147 | local status, result = self:pack(pcall(callback, self.data)) 148 | 149 | -- Release the read 150 | self.mutex:lock() 151 | self.count[2] = self.count[2] - 1 152 | self.mutex:unlock() 153 | self.read_condition:signal() 154 | 155 | if status == false then 156 | error(unpack(result)) 157 | end 158 | 159 | return unpack(result) 160 | end 161 | end 162 | 163 | -- Pack returned results into a table 164 | function Atomic_:pack(status, ...) 165 | return status, {...} 166 | end 167 | 168 | -- Free allocated resources 169 | function Atomic_:free() 170 | self.mutex:free() 171 | self.wrote_condition:free() 172 | self.read_condition:free() 173 | end 174 | 175 | -- This works for Lua 5.2, for Lua 5.1 / LuaJIT we depend on self.proxy 176 | function Atomic_:__gc() 177 | self:free() 178 | end 179 | 180 | -- Serialization of this object 181 | function Atomic_:__write(f) 182 | local data = self.serializer:save(self.data) 183 | f:writeObject(data) 184 | local count = self.serializer:save(self.count) 185 | f:writeObject(count) 186 | 187 | local mutex = threads.Mutex(self.mutex:id()) 188 | f:writeObject(self.mutex:id()) 189 | local wrote_condition = threads.Condition(self.wrote_condition:id()) 190 | f:writeObject(self.wrote_condition:id()) 191 | local read_condition = threads.Condition(self.read_condition:id()) 192 | f:writeObject(self.read_condition:id()) 193 | end 194 | 195 | -- Deserialization of this object 196 | function Atomic_:__read(f) 197 | if not self.serializer then 198 | self.serializer = Serializer() 199 | end 200 | 201 | local data = f:readObject() 202 | self.data = self.serializer:load(data) 203 | local count = f:readObject() 204 | self.count = self.serializer:load(count) 205 | 206 | self.mutex = threads.Mutex(f:readObject()) 207 | self.mutex:free() 208 | self.wrote_condition = threads.Condition(f:readObject()) 209 | self.wrote_condition:free() 210 | self.read_condition = threads.Condition(f:readObject()) 211 | self.read_condition:free() 212 | 213 | -- Lua 5.1 / LuaJIT garbage collection 214 | if newproxy then 215 | self.proxy = newproxy(true) 216 | getmetatable(self.proxy).__gc = function () self:__gc() end 217 | end 218 | end 219 | 220 | -- Return the class, not the metatable Atomic_ 221 | return tunnel.Atomic 222 | -------------------------------------------------------------------------------- /unittest/block.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Unittest for Block 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local Block = require('tunnel.block') 7 | 8 | local threads = require('threads') 9 | local torch = require('torch') 10 | 11 | local Atomic = require('tunnel.atomic') 12 | local Vector = require('tunnel.vector') 13 | 14 | -- A Logic Named Joe 15 | local joe = {} 16 | 17 | function joe.main() 18 | if joe.init then 19 | print('Initializing testing environment') 20 | joe:init() 21 | end 22 | for name, func in pairs(joe) do 23 | if type(name) == 'string' and type(func) == 'function' 24 | and name:match('[%g]+Test') then 25 | print('\nExecuting '..name) 26 | func(joe) 27 | end 28 | end 29 | if joe.free then 30 | print('Freeing testing environment') 31 | joe:free() 32 | end 33 | end 34 | 35 | function joe:dummyTest() 36 | local print_mutex = threads.Mutex() 37 | local print_mutex_id = print_mutex:id() 38 | 39 | print('Creating a block of 5 threads') 40 | local init_job = function () 41 | local threads = require('threads') 42 | 43 | local print_mutex = threads.Mutex(print_mutex_id) 44 | print_mutex:lock() 45 | print('init_job', __threadid) 46 | print_mutex:unlock() 47 | print_mutex:free() 48 | end 49 | local block = Block(5, init_job) 50 | 51 | print('Executing a first job for the threads') 52 | local first_job = function() 53 | local ffi = require('ffi') 54 | local threads = require('threads') 55 | 56 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 57 | ffi.C.sleep(1) 58 | 59 | local print_mutex = threads.Mutex(print_mutex_id) 60 | print_mutex:lock() 61 | print('first_job', __threadid) 62 | print_mutex:unlock() 63 | print_mutex:free() 64 | return 'first job return message', __threadid 65 | end 66 | local run_id = block:run(first_job) 67 | print('Got run id: '..run_id) 68 | 69 | print('Synchronize the threads') 70 | local result, status = block:synchronize() 71 | print('Getting execution results') 72 | for index, value in ipairs(result) do 73 | print(status[index], unpack(value)) 74 | end 75 | 76 | print('Executing a second job for the threads') 77 | local second_job = function() 78 | local ffi = require('ffi') 79 | local threads = require('threads') 80 | 81 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 82 | ffi.C.sleep(1) 83 | 84 | local print_mutex = threads.Mutex(print_mutex_id) 85 | print_mutex:lock() 86 | print('second_job', __threadid) 87 | print_mutex:unlock() 88 | print_mutex:free() 89 | return 'second job return message', __threadid 90 | end 91 | local run_id = block:run(second_job) 92 | print('Got run id: '..run_id) 93 | 94 | print('Synchronize the threads') 95 | local result, status = block:synchronize() 96 | print('Getting execution results') 97 | for index, value in ipairs(result) do 98 | print(status[index], unpack(value)) 99 | end 100 | 101 | print('Terminating the threads') 102 | local result, status = block:terminate() 103 | end 104 | 105 | function joe:errorTest() 106 | local print_mutex = threads.Mutex() 107 | local print_mutex_id = print_mutex:id() 108 | 109 | print('Creating a block of 5 threads') 110 | local init_job = function () 111 | local threads = require('threads') 112 | 113 | local print_mutex = threads.Mutex(print_mutex_id) 114 | print_mutex:lock() 115 | print('init_job', __threadid) 116 | print_mutex:unlock() 117 | print_mutex:free() 118 | end 119 | local block = Block(5, init_job) 120 | 121 | print('Executing a first job for the threads') 122 | local first_job = function() 123 | local ffi = require('ffi') 124 | local threads = require('threads') 125 | 126 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 127 | ffi.C.sleep(1) 128 | 129 | local print_mutex = threads.Mutex(print_mutex_id) 130 | print_mutex:lock() 131 | print('first_job', __threadid) 132 | print_mutex:unlock() 133 | print_mutex:free() 134 | error('this is a test error for thread '..tostring(__threadid)) 135 | return 'first job return message', __threadid 136 | end 137 | local run_id = block:run(first_job) 138 | print('Got run id: '..run_id) 139 | 140 | print('Executing a second job for the threads') 141 | local second_job = function() 142 | local ffi = require('ffi') 143 | local threads = require('threads') 144 | 145 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 146 | ffi.C.sleep(1) 147 | 148 | local print_mutex = threads.Mutex(print_mutex_id) 149 | print_mutex:lock() 150 | print('second_job', __threadid) 151 | print_mutex:unlock() 152 | print_mutex:free() 153 | return 'second job return message', __threadid 154 | end 155 | local run_id = block:run(second_job) 156 | print('Got run id: '..run_id) 157 | 158 | print('Synchronize the threads') 159 | block:synchronize() 160 | print('Getting execution results for first job') 161 | local result, status = block:getResult(1) 162 | for index, value in ipairs(result) do 163 | print(status[index], unpack(value)) 164 | end 165 | print('Getting execution results for second job') 166 | local result, status = block:getResult(2) 167 | for index, value in ipairs(result) do 168 | print(status[index], unpack(value)) 169 | end 170 | 171 | print('Terminating the threads') 172 | local result, status = block:terminate() 173 | end 174 | 175 | function joe:producerConsumerTest() 176 | print('Creating a producer block of 2 threads') 177 | local producers = Block(2) 178 | print('Creating a consumer block of 3 threads') 179 | local consumers = Block(3) 180 | print('Creating a vector as a producer-consumer queue') 181 | local queue = Vector(4) 182 | print('Creating an atomic as a print atomic guard') 183 | local atomic = Atomic() 184 | 185 | print('Adding the queue and atomic to procuder block') 186 | producers:add(queue, atomic) 187 | print('Adding the queue and atomic to consumer block') 188 | consumers:add(queue, atomic) 189 | 190 | local producer_job = function (queue, atomic) 191 | for i = 1, 6 do 192 | local value = __threadid * 1000 + i 193 | local status = queue:pushBack(value) 194 | atomic:write(function() print('produce', __threadid, i, value) end) 195 | end 196 | end 197 | 198 | local consumer_job = function (queue, atomic) 199 | local ffi = require('ffi') 200 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 201 | for i = 1, 4 do 202 | local value, status = queue:popFront() 203 | atomic:write(function() print('consume', __threadid, i, value) end) 204 | ffi.C.sleep(1) 205 | end 206 | end 207 | 208 | print('Starting prodecuer jobs') 209 | producers:run(producer_job) 210 | print('Starting consumer jobs') 211 | consumers:run(consumer_job) 212 | 213 | producers:terminate() 214 | consumers:terminate() 215 | print('Producers and consumers terminated') 216 | end 217 | 218 | joe.main() 219 | return joe 220 | 221 | -------------------------------------------------------------------------------- /unittest/serializer.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Unittest for Serializer 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local Serializer = require('tunnel.serializer') 7 | 8 | local tds = require('tds') 9 | local threads = require('threads') 10 | local torch = require('torch') 11 | 12 | local Block = require('tunnel.block') 13 | local Printer = require('tunnel.printer') 14 | 15 | -- A Logic Named Joe 16 | local joe = {} 17 | 18 | function joe.main() 19 | if joe.init then 20 | print('Initializing testing environment') 21 | joe:init() 22 | end 23 | for name, func in pairs(joe) do 24 | if type(name) == 'string' and type(func) == 'function' 25 | and name:match('[%g]+Test') then 26 | print('\nExecuting '..name) 27 | func(joe) 28 | end 29 | end 30 | end 31 | 32 | function joe:init() 33 | print('Creating a serializer') 34 | self.serializer = Serializer() 35 | end 36 | 37 | function joe:hashTest() 38 | local serializer = self.serializer 39 | local block = Block(2) 40 | local printer = Printer() 41 | 42 | local data = tds.Hash() 43 | printer('Created data', torch.pointer(data)) 44 | local serialized_data = serializer:save(data) 45 | printer('Serialized data', torch.pointer(serialized_data)) 46 | local deserialized_data = serializer:load(serialized_data) 47 | printer('Deserialized data', torch.pointer(deserialized_data)) 48 | 49 | block:add(serialized_data, printer) 50 | block:run(self:hashJob()) 51 | 52 | local result, status = block:synchronize() 53 | for i, v in ipairs(status) do 54 | if status[i] == false then 55 | printer('Error in thread', i, v, unpack(result[i])) 56 | end 57 | end 58 | end 59 | 60 | function joe:hashJob() 61 | return function (serialized_data, printer) 62 | local torch = require('torch') 63 | local Serializer = require('tunnel.serializer') 64 | local serializer = Serializer() 65 | printer('Obtained serialized data', __threadid, 66 | torch.pointer(serialized_data)); 67 | local data = serializer:retain(serialized_data) 68 | printer('Obtained deserialized data', __threadid, torch.pointer(data)) 69 | end 70 | end 71 | 72 | function joe:vecTest() 73 | local serializer = self.serializer 74 | local block = Block(2) 75 | local printer = Printer() 76 | 77 | local data = tds.Vec() 78 | printer('Created data', torch.pointer(data)) 79 | local serialized_data = serializer:save(data) 80 | printer('Serialized data', torch.pointer(serialized_data)) 81 | local deserialized_data = serializer:load(serialized_data) 82 | printer('Deserialized data', torch.pointer(deserialized_data)) 83 | 84 | block:add(serialized_data, printer) 85 | block:run(self:vecJob()) 86 | 87 | local result, status = block:synchronize() 88 | for i, v in ipairs(status) do 89 | if status[i] == false then 90 | printer('Error in thread', i, v, unpack(result[i])) 91 | end 92 | end 93 | end 94 | 95 | function joe:vecJob() 96 | return function (serialized_data, printer) 97 | local torch = require('torch') 98 | local Serializer = require('tunnel.serializer') 99 | local serializer = Serializer() 100 | printer('Obtained serialized data', __threadid, 101 | torch.pointer(serialized_data)); 102 | local data = serializer:retain(serialized_data) 103 | printer('Obtained deserialized data', __threadid, torch.pointer(data)) 104 | end 105 | end 106 | 107 | function joe:counterTest() 108 | local serializer = self.serializer 109 | local block = Block(2) 110 | local printer = Printer() 111 | 112 | local data = tds.AtomicCounter() 113 | printer('Created data', torch.pointer(data)) 114 | local serialized_data = serializer:save(data) 115 | printer('Serialized data', torch.pointer(serialized_data)) 116 | local deserialized_data = serializer:load(serialized_data) 117 | printer('Deserialized data', torch.pointer(deserialized_data)) 118 | 119 | block:add(serialized_data, printer) 120 | block:run(self:counterJob()) 121 | 122 | local result, status = block:synchronize() 123 | for i, v in ipairs(status) do 124 | if status[i] == false then 125 | printer('Error in thread', i, v, unpack(result[i])) 126 | end 127 | end 128 | end 129 | 130 | function joe:counterJob() 131 | return function (serialized_data, printer) 132 | local torch = require('torch') 133 | local Serializer = require('tunnel.serializer') 134 | local serializer = Serializer() 135 | printer('Obtained serialized data', __threadid, 136 | torch.pointer(serialized_data)); 137 | local data = serializer:retain(serialized_data) 138 | printer('Obtained deserialized data', __threadid, torch.pointer(data)) 139 | end 140 | end 141 | 142 | function joe:storageTest() 143 | local serializer = self.serializer 144 | local block = Block(2) 145 | local printer = Printer() 146 | 147 | local data = torch.DoubleStorage(5) 148 | printer('Created data', torch.pointer(data)) 149 | local serialized_data = serializer:save(data) 150 | printer('Serialized data', torch.pointer(serialized_data)) 151 | local deserialized_data = serializer:load(serialized_data) 152 | printer('Deserialized data', torch.pointer(deserialized_data)) 153 | 154 | block:add(serialized_data, printer) 155 | block:run(self:storageJob()) 156 | 157 | local result, status = block:synchronize() 158 | for i, v in ipairs(status) do 159 | if status[i] == false then 160 | printer('Error in thread', i, v, unpack(result[i])) 161 | end 162 | end 163 | end 164 | 165 | function joe:storageJob() 166 | return function (serialized_data, printer) 167 | local torch = require('torch') 168 | local Serializer = require('tunnel.serializer') 169 | local serializer = Serializer() 170 | printer('Obtained serialized data', __threadid, 171 | torch.pointer(serialized_data)); 172 | local data = serializer:retain(serialized_data) 173 | printer('Obtained deserialized data', __threadid, torch.pointer(data)) 174 | end 175 | end 176 | 177 | function joe:tensorTest() 178 | local serializer = self.serializer 179 | local block = Block(2) 180 | local printer = Printer() 181 | 182 | local data = torch.DoubleTensor(5) 183 | printer('Created data', torch.pointer(data)) 184 | local serialized_data = serializer:save(data) 185 | printer('Serialized data', torch.pointer(serialized_data)) 186 | local deserialized_data = serializer:load(serialized_data) 187 | printer('Deserialized data', torch.pointer(deserialized_data)) 188 | 189 | block:add(serialized_data, printer) 190 | block:run(self:tensorJob()) 191 | 192 | local result, status = block:synchronize() 193 | for i, v in ipairs(status) do 194 | if status[i] == false then 195 | printer('Error in thread', i, v, unpack(result[i])) 196 | end 197 | end 198 | end 199 | 200 | function joe:tensorJob() 201 | return function (serialized_data, printer) 202 | local torch = require('torch') 203 | local Serializer = require('tunnel.serializer') 204 | local serializer = Serializer() 205 | printer('Obtained serialized data', __threadid, 206 | torch.pointer(serialized_data)); 207 | local data = serializer:retain(serialized_data) 208 | printer('Obtained deserialized data', __threadid, torch.pointer(data)) 209 | end 210 | end 211 | 212 | joe.main() 213 | return joe 214 | -------------------------------------------------------------------------------- /tunnel/serializer.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Data serialization for Tunnel 3 | Copyright Xiang Zhang 2016 4 | --]] 5 | 6 | local ffi = require('ffi') 7 | local serialize = require('threads.sharedserialize') 8 | local tds = require('tds') 9 | local torch = require('torch') 10 | 11 | tunnel = tunnel or {} 12 | 13 | local Serializer_ = torch.class('tunnel.Serializer') 14 | 15 | Serializer_.state = nil 16 | local STATE = {WRITE = 1, READ = 2, RETAIN = 3} 17 | 18 | -- Serialize function 19 | function Serializer_:save(object) 20 | local state = self.setWrite() 21 | local f = torch.MemoryFile() 22 | f:binary() 23 | f:writeObject(object) 24 | local storage = f:storage() 25 | f:close() 26 | self.setState(state) 27 | return storage 28 | end 29 | 30 | -- Load function 31 | function Serializer_:load(storage) 32 | local state = self.setRead() 33 | local f = torch.MemoryFile(storage) 34 | f:binary() 35 | local object = f:readObject() 36 | f:close() 37 | self.setState(state) 38 | return object 39 | end 40 | 41 | -- Retrain-and-load function 42 | function Serializer_:retain(storage) 43 | local state = self.setRetain() 44 | local f = torch.MemoryFile(storage) 45 | f:binary() 46 | local object = f:readObject() 47 | f:close() 48 | self.setState(state) 49 | return object 50 | end 51 | 52 | function Serializer_.setWrite() 53 | return Serializer_.setState(STATE.WRITE) 54 | end 55 | 56 | function Serializer_.setRead() 57 | return Serializer_.setState(STATE.READ) 58 | end 59 | 60 | function Serializer_.setRetain() 61 | return Serializer_.setState(STATE.RETAIN) 62 | end 63 | 64 | function Serializer_.setState(state) 65 | local old_state = Serializer_.state 66 | if Serializer_.state ~= state then 67 | if Serializer_.state ~= nil then 68 | Serializer_.swap[Serializer_.state]() 69 | end 70 | if state ~= nil then 71 | Serializer_.swap[state]() 72 | end 73 | Serializer_.state = state 74 | end 75 | return old_state 76 | end 77 | 78 | function Serializer_.swapWrite() 79 | for name, metatable in pairs(Serializer_.metatables) do 80 | local current = torch.getmetatable(name) 81 | if current then 82 | current.__factory, metatable.factory = 83 | metatable.factory, current.__factory 84 | current.__write, metatable.write = 85 | metatable.write, current.__write 86 | current.write, metatable.__write = 87 | metatable.__write, current.write 88 | end 89 | end 90 | end 91 | 92 | -- Swap metatables for reading 93 | function Serializer_.swapRead() 94 | for name, metatable in pairs(Serializer_.metatables) do 95 | local current = torch.getmetatable(name) 96 | if current then 97 | current.__factory, metatable.factory = 98 | metatable.factory, current.__factory 99 | current.__read, metatable.read = 100 | metatable.read, current.__read 101 | current.read, metatable.__read = 102 | metatable.__read, current.read 103 | end 104 | end 105 | end 106 | 107 | -- Swap metatables for retained reading 108 | function Serializer_.swapRetain() 109 | for name, metatable in pairs(Serializer_.metatables) do 110 | local current = torch.getmetatable(name) 111 | if current then 112 | current.__factory, metatable.factory = 113 | metatable.factory, current.__factory 114 | current.__read, metatable.retain = 115 | metatable.retain, current.__read 116 | current.read, metatable.__retain = 117 | metatable.__retain, current.read 118 | end 119 | end 120 | end 121 | 122 | -- Swap tables based on state 123 | Serializer_.swap = {} 124 | Serializer_.swap[STATE.WRITE] = Serializer_.swapWrite 125 | Serializer_.swap[STATE.READ] = Serializer_.swapRead 126 | Serializer_.swap[STATE.RETAIN] = Serializer_.swapRetain 127 | 128 | -- Serialize pointer function 129 | function Serializer_.savePointer(object, f) 130 | f:writeLong(torch.pointer(object)) 131 | end 132 | 133 | -- Deserialization pointer function 134 | function Serializer_.loadPointer(f) 135 | return f:readLong() 136 | end 137 | 138 | -- Serializer metatatables for different data-types 139 | Serializer_.metatables = {} 140 | 141 | Serializer_.metatables['tds.Hash'] = {} 142 | Serializer_.metatables['tds.Hash']['factory'] = function(f) 143 | local object = Serializer_.loadPointer(f) 144 | object = ffi.cast('tds_hash&', object) 145 | ffi.gc(object, tds.C.tds_hash_free) 146 | return object 147 | end 148 | Serializer_.metatables['tds.Hash']['write'] = function(object, f) 149 | Serializer_.savePointer(object, f) 150 | tds.C.tds_hash_retain(object) 151 | end 152 | Serializer_.metatables['tds.Hash']['read'] = function(object, f) 153 | end 154 | Serializer_.metatables['tds.Hash']['retain'] = function(object, f) 155 | tds.C.tds_hash_retain(object) 156 | end 157 | 158 | Serializer_.metatables['tds.Vec'] = {} 159 | Serializer_.metatables['tds.Vec']['factory'] = function(f) 160 | local object = Serializer_.loadPointer(f) 161 | object = ffi.cast('tds_vec&', object) 162 | ffi.gc(object, tds.C.tds_vec_free) 163 | return object 164 | end 165 | Serializer_.metatables['tds.Vec']['write'] = function(object, f) 166 | Serializer_.savePointer(object, f) 167 | tds.C.tds_vec_retain(object) 168 | end 169 | Serializer_.metatables['tds.Vec']['read'] = function(object, f) 170 | end 171 | Serializer_.metatables['tds.Vec']['retain'] = function(object, f) 172 | tds.C.tds_vec_retain(object) 173 | end 174 | 175 | Serializer_.metatables['tds.AtomicCounter'] = {} 176 | Serializer_.metatables['tds.AtomicCounter']['factory'] = function(f) 177 | local object = Serializer_.loadPointer(f) 178 | object = ffi.cast('tds_atomic_counter&', object) 179 | ffi.gc(object, tds.C.tds_atomic_free) 180 | return object 181 | end 182 | Serializer_.metatables['tds.AtomicCounter']['write'] = function (object, f) 183 | Serializer_.savePointer(object, f) 184 | tds.C.tds_atomic_retain(object) 185 | end 186 | Serializer_.metatables['tds.AtomicCounter']['read'] = function(object, f) 187 | end 188 | Serializer_.metatables['tds.AtomicCounter']['retain'] = function (object, f) 189 | tds.C.tds_atomic_retain(object) 190 | end 191 | 192 | Serializer_.metatables['torch.Allocator'] = {} 193 | Serializer_.metatables['torch.Allocator']['factory'] = function(f) 194 | local object = Serializer_.loadPointer(f) 195 | object = torch.pushudata(object, name) 196 | return object 197 | end 198 | Serializer_.metatables['torch.Allocator']['write'] = function(object, f) 199 | Serializer_.savePointer(object, f) 200 | end 201 | Serializer_.metatables['torch.Allocator']['read'] = function(object, f) 202 | end 203 | Serializer_.metatables['torch.Allocator']['retain'] = function(object, f) 204 | end 205 | 206 | for _, name in ipairs{ 207 | 'torch.ByteTensor', 208 | 'torch.CharTensor', 209 | 'torch.ShortTensor', 210 | 'torch.IntTensor', 211 | 'torch.LongTensor', 212 | 'torch.FloatTensor', 213 | 'torch.DoubleTensor', 214 | 'torch.CudaTensor', 215 | 'torch.ByteStorage', 216 | 'torch.CharStorage', 217 | 'torch.ShortStorage', 218 | 'torch.IntStorage', 219 | 'torch.LongStorage', 220 | 'torch.FloatStorage', 221 | 'torch.DoubleStorage', 222 | 'torch.CudaStorage'} do 223 | 224 | Serializer_.metatables[name] = {} 225 | Serializer_.metatables[name]['factory'] = function(f) 226 | local object = Serializer_.loadPointer(f) 227 | object = torch.pushudata(object, name) 228 | return object 229 | end 230 | Serializer_.metatables[name]['write'] = function(object, f) 231 | Serializer_.savePointer(object, f) 232 | object:retain() 233 | end 234 | Serializer_.metatables[name]['read'] = function(object, f) 235 | end 236 | Serializer_.metatables[name]['retain'] = function(object, f) 237 | object:retain() 238 | end 239 | end 240 | 241 | return tunnel.Serializer 242 | -------------------------------------------------------------------------------- /unittest/counter.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Unittest for counter 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local Counter = require('tunnel.counter') 7 | 8 | local Block = require('tunnel.block') 9 | local Printer = require('tunnel.printer') 10 | 11 | -- A Logic Named Joe 12 | local joe = {} 13 | 14 | function joe.main() 15 | if joe.init then 16 | print('Initializing testing environment') 17 | joe:init() 18 | end 19 | for name, func in pairs(joe) do 20 | if type(name) == 'string' and type(func) == 'function' 21 | and name:match('[%g]+Test') then 22 | print('\nExecuting '..name) 23 | func(joe) 24 | end 25 | end 26 | end 27 | 28 | function joe:init() 29 | local counter = Counter() 30 | local printer = Printer() 31 | self.counter = counter 32 | self.printer = printer 33 | self.printer('Counter initialized', counter:get()) 34 | end 35 | 36 | function joe:counterTest() 37 | -- 2 synchronous increase threads 38 | local sync_increase_block = Block(2) 39 | sync_increase_block:add(self.printer, self.counter) 40 | sync_increase_block:run(self.syncIncreaseJob()) 41 | 42 | -- 2 synchronous decrease threads 43 | local sync_decrease_block = Block(2) 44 | sync_decrease_block:add(self.printer, self.counter) 45 | sync_decrease_block:run(self.syncDecreaseJob()) 46 | 47 | -- 2 asynchronous increase threads 48 | local async_increase_block = Block(2) 49 | async_increase_block:add(self.printer, self.counter) 50 | async_increase_block:run(self.asyncIncreaseJob()) 51 | 52 | -- 2 asynchronous decrease threads 53 | local async_decrease_block = Block(2) 54 | async_decrease_block:add(self.printer, self.counter) 55 | async_decrease_block:run(self.asyncDecreaseJob()) 56 | 57 | -- 2 synchronous get threads 58 | local sync_get_block = Block(2) 59 | sync_get_block:add(self.printer, self.counter) 60 | sync_get_block:run(self.syncGetJob()) 61 | 62 | -- 2 synchronous set threads 63 | local sync_set_block = Block(2) 64 | sync_set_block:add(self.printer, self.counter) 65 | sync_set_block:run(self.syncSetJob()) 66 | 67 | -- 2 asynchronous get threads 68 | local async_get_block = Block(2) 69 | async_get_block:add(self.printer, self.counter) 70 | async_get_block:run(self.asyncGetJob()) 71 | 72 | -- 2 asynchronous set threads 73 | local async_set_block = Block(2) 74 | async_set_block:add(self.printer, self.counter) 75 | async_set_block:run(self.asyncSetJob()) 76 | 77 | -- 2 synchronous read threads 78 | local sync_read_block = Block(2) 79 | sync_read_block:add(self.printer, self.counter) 80 | sync_read_block:run(self.syncReadJob()) 81 | 82 | -- 2 synchronous write threads 83 | local sync_write_block = Block(2) 84 | sync_write_block:add(self.printer, self.counter) 85 | sync_write_block:run(self.syncWriteJob()) 86 | 87 | -- 2 asynchronous read threads 88 | local async_read_block = Block(2) 89 | async_read_block:add(self.printer, self.counter) 90 | async_read_block:run(self.asyncReadJob()) 91 | 92 | -- 2 asynchronous write threads 93 | local async_write_block = Block(2) 94 | async_write_block:add(self.printer, self.counter) 95 | async_write_block:run(self.asyncWriteJob()) 96 | end 97 | 98 | function joe:syncIncreaseJob() 99 | return function (printer, counter) 100 | local ffi = require('ffi') 101 | local math = require('math') 102 | local os = require('os') 103 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 104 | math.randomseed(os.time() + 10000 + __threadid * 1000) 105 | for i = 1, 60 do 106 | local step = math.random(10) 107 | local value = counter:increase(step) 108 | printer('sync_increase', __threadid, i, step, value) 109 | ffi.C.sleep(1) 110 | end 111 | end 112 | end 113 | 114 | function joe:syncDecreaseJob() 115 | return function (printer, counter) 116 | local ffi = require('ffi') 117 | local math = require('math') 118 | local os = require('os') 119 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 120 | math.randomseed(os.time() + 20000 + __threadid * 1000) 121 | for i = 1, 60 do 122 | local step = math.random(10) 123 | local value = counter:decrease(step) 124 | printer('sync_decrease', __threadid, i, step, value) 125 | ffi.C.sleep(1) 126 | end 127 | end 128 | end 129 | 130 | function joe:asyncIncreaseJob() 131 | return function (printer, counter) 132 | local ffi = require('ffi') 133 | local math = require('math') 134 | local os = require('os') 135 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 136 | math.randomseed(os.time() + 30000 + __threadid * 1000) 137 | for i = 1, 60 do 138 | local step = math.random(10) 139 | local value = counter:increaseAsync(step) 140 | printer('async_increase', __threadid, i, step, value) 141 | ffi.C.sleep(1) 142 | end 143 | end 144 | end 145 | 146 | function joe:asyncDecreaseJob() 147 | return function (printer, counter) 148 | local ffi = require('ffi') 149 | local math = require('math') 150 | local os = require('os') 151 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 152 | math.randomseed(os.time() + 40000 + __threadid * 1000) 153 | for i = 1, 60 do 154 | local step = math.random(10) 155 | local value = counter:decreaseAsync(step) 156 | printer('async_decrease', __threadid, i, step, value) 157 | ffi.C.sleep(1) 158 | end 159 | end 160 | end 161 | 162 | function joe:syncGetJob() 163 | return function (printer, counter) 164 | local ffi = require('ffi') 165 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 166 | for i = 1, 60 do 167 | local value = counter:get() 168 | printer('sync_get', __threadid, i, value) 169 | ffi.C.sleep(1) 170 | end 171 | end 172 | end 173 | 174 | function joe:syncSetJob() 175 | return function (printer, counter) 176 | local ffi = require('ffi') 177 | local math = require('math') 178 | local os = require('os') 179 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 180 | math.randomseed(os.time() + 50000 + __threadid * 1000) 181 | for i = 1, 60 do 182 | local value = math.random(10) 183 | value = counter:set(value) 184 | printer('sync_set', __threadid, i, value) 185 | ffi.C.sleep(1) 186 | end 187 | end 188 | end 189 | 190 | function joe:asyncGetJob() 191 | return function (printer, counter) 192 | local ffi = require('ffi') 193 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 194 | for i = 1, 60 do 195 | local value = counter:getAsync() 196 | printer('async_get', __threadid, i, value) 197 | ffi.C.sleep(1) 198 | end 199 | end 200 | end 201 | 202 | function joe:asyncSetJob() 203 | return function (printer, counter) 204 | local ffi = require('ffi') 205 | local math = require('math') 206 | local os = require('os') 207 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 208 | math.randomseed(os.time() + 60000 + __threadid * 1000) 209 | for i = 1, 60 do 210 | local value = math.random(10) 211 | value = counter:setAsync(value) 212 | printer('async_set', __threadid, i, value) 213 | ffi.C.sleep(1) 214 | end 215 | end 216 | end 217 | 218 | function joe:syncReadJob() 219 | return function (printer, counter) 220 | local ffi = require('ffi') 221 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 222 | for i = 1, 60 do 223 | local value = counter:read(function(value) return value end) 224 | printer('sync_read', __threadid, i, value) 225 | ffi.C.sleep(1) 226 | end 227 | end 228 | end 229 | 230 | function joe:asyncReadJob() 231 | return function (printer, counter) 232 | local ffi = require('ffi') 233 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 234 | for i = 1, 60 do 235 | local value = counter:readAsync(function(value) return value end) 236 | printer('async_read', __threadid, i, value) 237 | ffi.C.sleep(1) 238 | end 239 | end 240 | end 241 | 242 | function joe:syncWriteJob() 243 | return function (printer, counter) 244 | local ffi = require('ffi') 245 | local math = require('math') 246 | local os = require('os') 247 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 248 | math.randomseed(os.time() + 70000 + __threadid * 1000) 249 | for i = 1, 60 do 250 | local value = math.random(10) 251 | value = counter:write(function (old_value) return value end) 252 | printer('sync_write', __threadid, i, value) 253 | ffi.C.sleep(1) 254 | end 255 | end 256 | end 257 | 258 | function joe:asyncWriteJob() 259 | return function (printer, counter) 260 | local ffi = require('ffi') 261 | local math = require('math') 262 | local os = require('os') 263 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 264 | math.randomseed(os.time() + 80000 + __threadid * 1000) 265 | for i = 1, 60 do 266 | local value = math.random(10) 267 | value = counter:writeAsync(function (old_value) return value end) 268 | printer('async_write', __threadid, i, value) 269 | ffi.C.sleep(1) 270 | end 271 | end 272 | end 273 | 274 | joe.main() 275 | return joe 276 | -------------------------------------------------------------------------------- /tunnel/hash.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Multi-threaded hash 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local tds = require('tds') 7 | local torch = require('torch') 8 | 9 | local Atomic = require('tunnel.atomic') 10 | local Counter = require('tunnel.counter') 11 | local Serializer = require('tunnel.serializer') 12 | 13 | tunnel = tunnel or {} 14 | 15 | -- Append an underscore to distinguish between metatable and class name 16 | local Hash_ = torch.class('tunnel.Hash') 17 | 18 | -- Constructor 19 | function Hash_:__init() 20 | self.hash = Atomic(tds.Hash()) 21 | self.serializer = Serializer() 22 | self.counter = Counter(1) 23 | 24 | -- Lua 5.1 / LuaJIT garbage collection 25 | if newproxy then 26 | self.proxy = newproxy(true) 27 | getmetatable(self.proxy).__gc = function() self:__gc() end 28 | end 29 | 30 | return self 31 | end 32 | 33 | -- Get an item 34 | function Hash_:get(key) 35 | local storage_string, status = self.hash:read( 36 | function (hash) 37 | return hash[key], true 38 | end) 39 | if storage_string then 40 | local storage = torch.CharStorage():string(storage_string) 41 | return self.serializer:retain(storage), status 42 | else 43 | return nil, status 44 | end 45 | 46 | return self 47 | end 48 | 49 | -- Get an item asynchronously 50 | function Hash_:getAsync(key) 51 | local storage_string, status = self.hash:readAsync( 52 | function (hash) 53 | return hash[key], true 54 | end) 55 | if storage_string then 56 | local storage = torch.CharStorage():string(storage_string) 57 | return self.serializer:retain(storage), status 58 | else 59 | return nil, status 60 | end 61 | end 62 | 63 | -- Set an item 64 | function Hash_:set(key, value) 65 | if value ~= nil then 66 | local storage = self.serializer:save(value) 67 | return self.hash:write( 68 | function (hash) 69 | local old_value 70 | if hash[key] ~= nil then 71 | old_value = self.serializer:load( 72 | torch.CharStorage():string(hash[key])) 73 | end 74 | hash[key] = storage:string() 75 | return true, old_value 76 | end) 77 | else 78 | return self.hash:write( 79 | function (hash) 80 | local old_value 81 | if hash[key] ~= nil then 82 | old_value = self.serializer:load( 83 | torch.CharStorage():string(hash[key])) 84 | end 85 | hash[key] = nil 86 | return true, old_value 87 | end) 88 | end 89 | end 90 | 91 | -- Set an item asynchronously 92 | function Hash_:setAsync(key, value) 93 | if value ~= nil then 94 | local storage = self.serializer:save(value) 95 | return self.hash:writeAsync( 96 | function (hash) 97 | local old_value 98 | if hash[key] ~= nil then 99 | old_value = self.serializer:load( 100 | torch.CharStorage():string(hash[key])) 101 | end 102 | hash[key] = storage:string() 103 | return true, old_value 104 | end) 105 | else 106 | return self.hash:writeAsync( 107 | function (hash) 108 | local old_value 109 | if hash[key] ~= nil then 110 | old_value = self.serializer:load( 111 | torch.CharStorage():string(hash[key])) 112 | end 113 | hash[key] = nil 114 | return true, old_value 115 | end) 116 | end 117 | end 118 | 119 | -- Read an item 120 | function Hash_:read(key, callback) 121 | return self.hash:read( 122 | function (hash) 123 | local value 124 | local storage_string = hash[key] 125 | if storage_string then 126 | local storage = torch.CharStorage():string(storage_string) 127 | value = self.serializer:retain(storage) 128 | end 129 | return callback(value) 130 | end) 131 | end 132 | 133 | -- Read an item asynchronously 134 | function Hash_:readAsync(key, callback) 135 | return self.hash:readAsync( 136 | function (hash) 137 | local value 138 | local storage_string = hash[key] 139 | if storage_string then 140 | local storage = torch.CharStorage():string(storage_string) 141 | value = self.serializer:retain(storage) 142 | end 143 | return callback(value) 144 | end) 145 | end 146 | 147 | -- Write an item 148 | function Hash_:write(key, callback) 149 | return self.hash:write( 150 | function (hash) 151 | local value 152 | local storage_string = hash[key] 153 | if storage_string then 154 | local storage = torch.CharStorage():string(storage_string) 155 | value = self.serializer:load(storage) 156 | end 157 | local new_value = callback(value) 158 | if new_value ~= nil then 159 | local new_storage = self.serializer:save(new_value) 160 | hash[key] = new_storage:string() 161 | else 162 | hash[key] = nil 163 | end 164 | return true, new_value 165 | end) 166 | end 167 | 168 | -- Write an item asynchronously 169 | function Hash_:writeAsync(key, callback) 170 | return self.hash:writeAsync( 171 | function (hash) 172 | local value 173 | local storage_string = hash[key] 174 | if storage_string then 175 | local storage = torch.CharStorage():string(storage_string) 176 | value = self.serializer:load(storage) 177 | end 178 | local new_value = callback(value) 179 | if new_value ~= nil then 180 | local new_storage = self.serializer:save(new_value) 181 | hash[key] = new_storage:string() 182 | else 183 | hash[key] = nil 184 | end 185 | return true, new_value 186 | end) 187 | end 188 | 189 | -- Get the size of the hash table 190 | function Hash_:size() 191 | return self.hash:read( 192 | function (hash) 193 | return #hash 194 | end) 195 | end 196 | 197 | -- Get the size of the hash table asynchronously 198 | function Hash_:sizeAsync() 199 | return self.hash:readAsync( 200 | function (hash) 201 | return #hash 202 | end) 203 | end 204 | 205 | -- Iterate through all the items 206 | function Hash_:iterator() 207 | local clone = self.hash:read( 208 | function (hash) 209 | local clone = tds.Hash() 210 | for key, value in pairs(hash) do 211 | clone[key] = 212 | self.serializer:save( 213 | self.serializer:retain( 214 | torch.CharStorage():string(value))):string() 215 | end 216 | return clone 217 | end) 218 | if clone then 219 | local iterator = pairs(clone) 220 | return function () 221 | local key, value = iterator() 222 | if value ~= nil then 223 | local storage = torch.CharStorage():string(value) 224 | return key, self.serializer:load(storage) 225 | end 226 | return key, value 227 | end, true 228 | end 229 | end 230 | 231 | -- Iterate through all the items asynchronously 232 | function Hash_:iteratorAsync() 233 | local clone = self.hash:readAsync( 234 | function (hash) 235 | local clone = tds.Hash() 236 | for key, value in pairs(hash) do 237 | clone[key] = 238 | self.serializer:save( 239 | self.serializer:retain( 240 | torch.CharStorage():string(value))):string() 241 | end 242 | return clone 243 | end) 244 | if clone then 245 | local iterator = pairs(clone) 246 | return function () 247 | local key, value = iterator() 248 | if value ~= nil then 249 | local storage = torch.CharStorage():string(value) 250 | return key, self.serializer:load(storage) 251 | end 252 | return key, value 253 | end, true 254 | end 255 | end 256 | 257 | -- Convert to string 258 | function Hash_:toString() 259 | return self.hash:read( 260 | function (hash) 261 | return tostring(hash) 262 | end) 263 | end 264 | 265 | -- Convert to string asynchronously 266 | function Hash_:toStringAsync() 267 | return self.hash:readAsync( 268 | function (hash) 269 | return tostring(hash) 270 | end) 271 | end 272 | 273 | -- Free the resources allocated by hash 274 | function Hash_:free() 275 | local value = self.counter:decrease() 276 | if value == 0 then 277 | self.hash:write( 278 | function (hash) 279 | for key, value in pairs(hash) do 280 | self.serializer:load(torch.CharStorage():string(value)) 281 | end 282 | end) 283 | end 284 | end 285 | 286 | -- The index operator 287 | function Hash_:__index(key) 288 | local method = Hash_[key] 289 | if method then 290 | return method 291 | else 292 | return self:get(key) 293 | end 294 | end 295 | 296 | -- The new index operator 297 | function Hash_:__newindex(key, value) 298 | -- Filter out function members 299 | local method = Hash_[key] 300 | if method then 301 | error('Cannot set when key is method name. Use Hash:set(key, value).') 302 | end 303 | -- Filter out data members 304 | if key == 'hash' or key == 'serializer' or key == 'proxy' 305 | or key == 'counter' then 306 | rawset(self, key, value) 307 | else 308 | self:set(key, value) 309 | end 310 | end 311 | 312 | -- To string 313 | function Hash_:__tostring() 314 | return self:toString() 315 | end 316 | 317 | -- Table iterator operator 318 | function Hash_:__pairs() 319 | return self:iterator() 320 | end 321 | 322 | -- Length of the hash 323 | function Hash_:__len() 324 | return self:size() 325 | end 326 | 327 | function Hash_:__gc() 328 | self:free() 329 | end 330 | 331 | -- Serialization of this object 332 | function Hash_:__write(f) 333 | self.counter:increase(1) 334 | f:writeObject(self.hash) 335 | f:writeObject(self.counter) 336 | end 337 | 338 | -- Deserialization of this object 339 | function Hash_:__read(f) 340 | self.hash = f:readObject() 341 | self.counter = f:readObject() 342 | self.serializer = Serializer() 343 | end 344 | 345 | -- Return the class, not the metatable 346 | return tunnel.Hash 347 | -------------------------------------------------------------------------------- /unittest/hash.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Unittest for hash 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local Hash = require('tunnel.hash') 7 | 8 | local Block = require('tunnel.block') 9 | local Printer = require('tunnel.printer') 10 | 11 | -- A Logic Named Joe 12 | local joe = {} 13 | 14 | function joe.main() 15 | if joe.init then 16 | print('Initializing testing environment') 17 | joe:init() 18 | end 19 | for name, func in pairs(joe) do 20 | if type(name) == 'string' and type(func) == 'function' 21 | and name:match('[%g]+Test') then 22 | print('\nExecuting '..name) 23 | func(joe) 24 | end 25 | end 26 | end 27 | 28 | function joe:init() 29 | local hash = Hash() 30 | local printer = Printer() 31 | self.hash = hash 32 | self.printer = printer 33 | end 34 | 35 | function joe:getSetTest() 36 | -- 2 synchronous get threads 37 | local sync_get_block = Block(2) 38 | sync_get_block:add(self.printer, self.hash) 39 | sync_get_block:run(self:syncGetJob()) 40 | 41 | -- 3 synchronous set threads 42 | local sync_set_block = Block(3) 43 | sync_set_block:add(self.printer, self.hash) 44 | sync_set_block:run(self:syncSetJob()) 45 | 46 | -- 2 asynchronous get threads 47 | local async_get_block = Block(2) 48 | async_get_block:add(self.printer, self.hash) 49 | async_get_block:run(self:asyncGetJob()) 50 | 51 | -- 3 asynchronous set threads 52 | local async_set_block = Block(3) 53 | async_set_block:add(self.printer, self.hash) 54 | async_set_block:run(self:asyncSetJob()) 55 | 56 | -- 1 synchronous iterator threads 57 | local sync_iterator_block = Block(1) 58 | sync_iterator_block:add(self.printer, self.hash) 59 | sync_iterator_block:run(self:syncIteratorJob()) 60 | 61 | -- 1 synchronous iterator threads 62 | local async_iterator_block = Block(1) 63 | async_iterator_block:add(self.printer, self.hash) 64 | async_iterator_block:run(self:asyncIteratorJob()) 65 | 66 | local status, result = sync_get_block:synchronize() 67 | for i, v in ipairs(status) do 68 | if v == false then 69 | self.printer('Error in sync_get_block', i, unpack(result[i])) 70 | end 71 | end 72 | self.printer('Synchronized with sync_get_block') 73 | local status, result = sync_set_block:synchronize() 74 | for i, v in ipairs(status) do 75 | if v == false then 76 | self.printer('Error in sync_set_block', i, unpack(result[i])) 77 | end 78 | end 79 | self.printer('Synchronized with sync_set_block') 80 | local status, result = async_get_block:synchronize() 81 | for i, v in ipairs(status) do 82 | if v == false then 83 | self.printer('Error in async_get_block', i, unpack(result[i])) 84 | end 85 | end 86 | self.printer('Synchronized with async_get_block') 87 | local status, result = async_set_block:synchronize() 88 | for i, v in ipairs(status) do 89 | if v == false then 90 | self.printer('Error in async_set_block', i, unpack(result[i])) 91 | end 92 | end 93 | self.printer('Synchronized with async_set_block') 94 | local status, result = sync_iterator_block:synchronize() 95 | for i, v in ipairs(status) do 96 | if v == false then 97 | self.printer('Error in sync_iterator_block', i, unpack(result[i])) 98 | end 99 | end 100 | self.printer('Synchronized with sync_iterator_block') 101 | local status, result = async_iterator_block:synchronize() 102 | for i, v in ipairs(status) do 103 | if v == false then 104 | self.printer('Error in async_iterator_block', i, unpack(result[i])) 105 | end 106 | end 107 | self.printer('Synchronized with async_iterator_block') 108 | end 109 | 110 | function joe:readWriteTest() 111 | -- 2 synchronous read threads 112 | local sync_read_block = Block(2) 113 | sync_read_block:add(self.printer, self.hash) 114 | sync_read_block:run(self:syncReadJob()) 115 | 116 | -- 3 synchronous write threads 117 | local sync_write_block = Block(3) 118 | sync_write_block:add(self.printer, self.hash) 119 | sync_write_block:run(self:syncWriteJob()) 120 | 121 | -- 2 asynchronous read threads 122 | local async_read_block = Block(2) 123 | async_read_block:add(self.printer, self.hash) 124 | async_read_block:run(self:asyncReadJob()) 125 | 126 | -- 3 asynchronous write threads 127 | local async_write_block = Block(3) 128 | async_write_block:add(self.printer, self.hash) 129 | async_write_block:run(self:asyncWriteJob()) 130 | 131 | -- 1 synchronous iterator threads 132 | local sync_iterator_block = Block(1) 133 | sync_iterator_block:add(self.printer, self.hash) 134 | sync_iterator_block:run(self:syncIteratorJob()) 135 | 136 | -- 1 synchronous iterator threads 137 | local async_iterator_block = Block(1) 138 | async_iterator_block:add(self.printer, self.hash) 139 | async_iterator_block:run(self:asyncIteratorJob()) 140 | 141 | local status, result = sync_read_block:synchronize() 142 | for i, v in ipairs(status) do 143 | if v == false then 144 | self.printer('Error in sync_read_block', i, unpack(result[i])) 145 | end 146 | end 147 | self.printer('Synchronized with sync_read_block') 148 | local status, result = sync_write_block:synchronize() 149 | for i, v in ipairs(status) do 150 | if v == false then 151 | self.printer('Error in sync_write_block', i, unpack(result[i])) 152 | end 153 | end 154 | self.printer('Synchronized with sync_write_block') 155 | local status, result = async_read_block:synchronize() 156 | for i, v in ipairs(status) do 157 | if v == false then 158 | self.printer('Error in async_read_block', i, unpack(result[i])) 159 | end 160 | end 161 | self.printer('Synchronized with async_read_block') 162 | local status, result = async_write_block:synchronize() 163 | for i, v in ipairs(status) do 164 | if v == false then 165 | self.printer('Error in async_write_block', i, unpack(result[i])) 166 | end 167 | end 168 | self.printer('Synchronized with async_write_block') 169 | local status, result = sync_iterator_block:synchronize() 170 | for i, v in ipairs(status) do 171 | if v == false then 172 | self.printer('Error in sync_iterator_block', i, unpack(result[i])) 173 | end 174 | end 175 | self.printer('Synchronized with sync_iterator_block') 176 | local status, result = async_iterator_block:synchronize() 177 | for i, v in ipairs(status) do 178 | if v == false then 179 | self.printer('Error in async_iterator_block', i, unpack(result[i])) 180 | end 181 | end 182 | self.printer('Synchronized with async_iterator_block') 183 | end 184 | 185 | function joe:syncGetJob() 186 | return function (printer, hash) 187 | local ffi = require('ffi') 188 | local math = require('math') 189 | local os = require('os') 190 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 191 | math.randomseed(os.time() + 20000 + __threadid * 1000) 192 | for i = 1, 30 do 193 | local key = tostring(math.random(100)) 194 | local value = hash[key] 195 | printer('sync_get', __threadid, i, key, tostring(value)) 196 | ffi.C.sleep(2) 197 | end 198 | printer('sync_get', __threadid, 'exit') 199 | end 200 | end 201 | 202 | function joe:syncSetJob() 203 | return function (printer, hash) 204 | local ffi = require('ffi') 205 | local math = require('math') 206 | local os = require('os') 207 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 208 | math.randomseed(os.time() + 10000 + __threadid * 1000) 209 | for i = 1, 60 do 210 | local key = tostring(math.random(100)) 211 | local value = 10000 + __threadid * 1000 + i 212 | hash[key] = value 213 | printer('sync_set', __threadid, i, key, value) 214 | ffi.C.sleep(1) 215 | end 216 | printer('sync_set', __threadid, 'exit') 217 | end 218 | end 219 | 220 | function joe:asyncGetJob() 221 | return function (printer, hash) 222 | local ffi = require('ffi') 223 | local math = require('math') 224 | local os = require('os') 225 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 226 | math.randomseed(os.time() + 40000 + __threadid *1000) 227 | for i = 1, 60 do 228 | local key = tostring(math.random(100)) 229 | local value, status = hash:getAsync(key) 230 | if status == true then 231 | printer('async_get', __threadid, i, key, tostring(value)) 232 | else 233 | printer('async_get', __threadid, i, key, 'blocked') 234 | end 235 | ffi.C.sleep(1) 236 | end 237 | printer('async_get', __threadid, 'exit') 238 | end 239 | end 240 | 241 | function joe:asyncSetJob() 242 | return function (printer, hash) 243 | local ffi = require('ffi') 244 | local math = require('math') 245 | local os = require('os') 246 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 247 | math.randomseed(os.time() + 30000 + __threadid * 1000) 248 | for i = 1, 60 do 249 | local key = tostring(math.random(100)) 250 | local value = 20000 + __threadid * 1000 + i 251 | local status, old_value = hash:setAsync(key, value) 252 | if status == true then 253 | printer('async_set', __threadid, i, key, value, old_value) 254 | else 255 | printer('async_set', __threadid, i, key, 'blocked') 256 | end 257 | ffi.C.sleep(1) 258 | end 259 | printer('async_set', __threadid, 'exit') 260 | end 261 | end 262 | 263 | function joe:syncReadJob() 264 | return function (printer, hash) 265 | local ffi = require('ffi') 266 | local math = require('math') 267 | local os = require('os') 268 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 269 | math.randomseed(os.time() + 40000 + __threadid *1000) 270 | for i = 1, 60 do 271 | local key = tostring(math.random(100)) 272 | local value, status = hash:read( 273 | key, function (value) return value, true end) 274 | if status == true then 275 | printer('sync_read', __threadid, i, key, tostring(value)) 276 | else 277 | printer('sync_read', __threadid, i, key, 'blocked') 278 | end 279 | ffi.C.sleep(1) 280 | end 281 | printer('sync_read', __threadid, 'exit') 282 | end 283 | end 284 | 285 | function joe:syncWriteJob() 286 | return function (printer, hash) 287 | local ffi = require('ffi') 288 | local math = require('math') 289 | local os = require('os') 290 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 291 | math.randomseed(os.time() + 30000 + __threadid * 1000) 292 | for i = 1, 60 do 293 | local key = tostring(math.random(100)) 294 | local value = 20000 + __threadid * 1000 + i 295 | local status, value = hash:write( 296 | key, function (old_value) return value end) 297 | if status == true then 298 | printer('sync_write', __threadid, i, key, value) 299 | else 300 | printer('sync_write', __threadid, i, key, 'blocked') 301 | end 302 | ffi.C.sleep(1) 303 | end 304 | printer('sync_write', __threadid, 'exit') 305 | end 306 | end 307 | 308 | function joe:asyncReadJob() 309 | return function (printer, hash) 310 | local ffi = require('ffi') 311 | local math = require('math') 312 | local os = require('os') 313 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 314 | math.randomseed(os.time() + 40000 + __threadid *1000) 315 | for i = 1, 60 do 316 | local key = tostring(math.random(100)) 317 | local value, status = hash:readAsync( 318 | key, function (value) return value, true end) 319 | if status == true then 320 | printer('async_read', __threadid, i, key, tostring(value)) 321 | else 322 | printer('async_read', __threadid, i, key, 'blocked') 323 | end 324 | ffi.C.sleep(1) 325 | end 326 | printer('async_read', __threadid, 'exit') 327 | end 328 | end 329 | 330 | function joe:asyncWriteJob() 331 | return function (printer, hash) 332 | local ffi = require('ffi') 333 | local math = require('math') 334 | local os = require('os') 335 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 336 | math.randomseed(os.time() + 30000 + __threadid * 1000) 337 | for i = 1, 60 do 338 | local key = tostring(math.random(100)) 339 | local value = 20000 + __threadid * 1000 + i 340 | local status, value = hash:writeAsync( 341 | key, function (old_value) return value end) 342 | if status == true then 343 | printer('async_write', __threadid, i, key, value) 344 | else 345 | printer('async_write', __threadid, i, key, 'blocked') 346 | end 347 | ffi.C.sleep(1) 348 | end 349 | printer('async_write', __threadid, 'exit') 350 | end 351 | end 352 | 353 | function joe:syncIteratorJob() 354 | return function (printer, hash) 355 | local ffi = require('ffi') 356 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 357 | for i = 1, 5 do 358 | for key, value in pairs(hash) do 359 | printer('sync_iterator', __threadid, i, key, value) 360 | end 361 | ffi.C.sleep(12) 362 | end 363 | printer('sync_iterator', __threadid, 'exit') 364 | end 365 | end 366 | 367 | function joe:asyncIteratorJob() 368 | return function (printer, hash) 369 | local ffi = require('ffi') 370 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 371 | for i = 1, 5 do 372 | local iterator, status = hash:iteratorAsync() 373 | if status == true then 374 | for key, value in iterator do 375 | printer('async_iterator', __threadid, i, key, value) 376 | end 377 | else 378 | printer('async_iterator', __threadid, i, 'blocked') 379 | end 380 | ffi.C.sleep(12) 381 | end 382 | printer('async_iterator', __threadid, 'exit') 383 | end 384 | end 385 | 386 | joe.main() 387 | return joe 388 | -------------------------------------------------------------------------------- /tunnel/vector.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Multi-threaded vector 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local tds = require('tds') 7 | local threads = require('threads') 8 | local torch = require('torch') 9 | 10 | local Atomic = require('tunnel.atomic') 11 | local Counter = require('tunnel.counter') 12 | local Serializer = require('tunnel.serializer') 13 | 14 | tunnel = tunnel or {} 15 | 16 | -- Append an underscore to distinguish between metatable and class name 17 | local Vector_ = torch.class('tunnel.Vector') 18 | 19 | -- Constructor 20 | -- The parameter is just a size hint. The actual size may go beyond it. 21 | -- Size hint is only useful in push and pop functions, where whether to wait for 22 | -- pushing or popping is depend on the current size of the vector. 23 | function Vector_:__init(size_hint) 24 | self.vector = Atomic(tds.Vec()) 25 | self.size_hint = size_hint or math.huge 26 | self.counter = Counter(1) 27 | self.serializer = Serializer() 28 | 29 | -- Mutex and conditions for push and pop functions based on size hint 30 | self.mutex = threads.Mutex() 31 | self.inserted_condition = threads.Condition() 32 | self.removed_condition = threads.Condition() 33 | 34 | -- Lua 5.1 / LuaJIT garbage collection 35 | if newproxy then 36 | self.proxy = newproxy(true) 37 | getmetatable(self.proxy).__gc = function () self:__gc() end 38 | end 39 | 40 | return self 41 | end 42 | 43 | -- Insert an item 44 | function Vector_:insert(...) 45 | local inserted = nil 46 | if select('#', ...) == 1 then 47 | local storage = self.serializer:save(select(1, ...)) 48 | inserted = self.vector:write( 49 | function (vector) 50 | vector:insert(storage:string()) 51 | return true 52 | end) 53 | else 54 | local index, value = select(1, ...), select(2, ...) 55 | local storage = self.serializer:save(value) 56 | inserted = self.vector:write( 57 | function (vector) 58 | -- When index > #vector + 1, tds.Vec.insert will result in 59 | -- segmentation fault. 60 | if index <= #vector + 1 then 61 | vector:insert(index, storage:string()) 62 | return true 63 | end 64 | end) 65 | end 66 | if inserted == true then 67 | self.inserted_condition:signal() 68 | end 69 | return inserted 70 | end 71 | 72 | -- Insert an item asynchronously 73 | function Vector_:insertAsync(...) 74 | local inserted = nil 75 | if select('#', ...) == 1 then 76 | local storage = self.serializer:save(select(1, ...)) 77 | inserted = self.vector:writeAsync( 78 | function (vector) 79 | vector:insert(storage:string()) 80 | return true 81 | end) 82 | else 83 | local index, value = select(1, ...), select(2, ...) 84 | local storage = self.serializer:save(value) 85 | inserted = self.vector:writeAsync( 86 | function (vector) 87 | -- When index > #vector + 1, tds.Vec.insert will result in 88 | -- segmentation fault. 89 | if index <= #vector + 1 then 90 | vector:insert(index, storage:string()) 91 | return true 92 | end 93 | end) 94 | end 95 | if inserted == true then 96 | self.inserted_condition:signal() 97 | end 98 | return inserted 99 | end 100 | 101 | -- Remove an item 102 | function Vector_:remove(index) 103 | local storage_string = self.vector:write( 104 | function (vector) 105 | local index = index or #vector 106 | local storage_string = vector[index] 107 | vector:remove(index) 108 | return storage_string 109 | end) 110 | if storage_string then 111 | local storage = torch.CharStorage():string(storage_string) 112 | self.removed_condition:signal() 113 | return self.serializer:load(storage), true 114 | end 115 | end 116 | 117 | -- Remove an item asynchronously 118 | function Vector_:removeAsync(index) 119 | local storage_string = self.vector:writeAsync( 120 | function (vector) 121 | local index = index or #vector 122 | local storage_string = vector[index] 123 | vector:remove(index) 124 | return storage_string 125 | end) 126 | if storage_string then 127 | local storage = torch.CharStorage():string(storage_string) 128 | self.removed_condition:signal() 129 | return self.serializer:load(storage), true 130 | end 131 | end 132 | 133 | -- Push the item at the front 134 | -- The function will wait untill the vector is smaller than self.size_hint. 135 | -- Note that there is no guarantee that after insertion the vector size will 136 | -- be smaller than or equal to self.size_hint. 137 | function Vector_:pushFront(value) 138 | while self:size() >= self.size_hint do 139 | self.mutex:lock() 140 | self.removed_condition:wait(self.mutex) 141 | self.mutex:unlock() 142 | end 143 | return self:insert(1, value) 144 | end 145 | 146 | -- Push the item at the front asynchronously 147 | -- If vector is larger than self.size_hint or there are other threads accessing 148 | -- it, return immediately. 149 | function Vector_:pushFrontAsync(value) 150 | local size = self:sizeAsync() 151 | if size and size < self.size_hint then 152 | return self:insertAsync(1, value) 153 | end 154 | end 155 | 156 | -- Pop the item at the front 157 | -- The function will wait untill the vector has more than one item. 158 | function Vector_:popFront() 159 | while self:size() < 1 do 160 | self.mutex:lock() 161 | self.inserted_condition:wait(self.mutex) 162 | self.mutex:unlock() 163 | end 164 | local value, removed = self:remove(1) 165 | 166 | while removed ~= true do 167 | while self:size() < 1 do 168 | self.mutex:lock() 169 | self.inserted_condition:wait(self.mutex) 170 | self.mutex:unlock() 171 | end 172 | value, removed = self:remove(1) 173 | end 174 | 175 | return value, removed 176 | end 177 | 178 | -- Pop the item at the front asynchronously 179 | -- If vector is smaller than 1 or there are other threads accessing it, return 180 | -- immediately 181 | function Vector_:popFrontAsync() 182 | local size = self:sizeAsync() 183 | if size and size > 0 then 184 | return self:removeAsync(1) 185 | end 186 | end 187 | 188 | -- Push the item at the back 189 | -- The function will wait untill the vector is smaller than self.size_hint. 190 | -- Note that there is no guarantee that after insertion the vector size will 191 | -- be smaller than or equal to self.size_hint. 192 | function Vector_:pushBack(value) 193 | while self:size() >= self.size_hint do 194 | self.mutex:lock() 195 | self.removed_condition:wait(self.mutex) 196 | self.mutex:unlock() 197 | end 198 | return self:insert(value) 199 | end 200 | 201 | -- Push the item at the back asynchronously 202 | -- If vector is larger than self.size_hint or there are other threads accessing 203 | -- it, return immediately. 204 | function Vector_:pushBackAsync(value) 205 | local size = self:sizeAsync() 206 | if size and size < self.size_hint then 207 | return self:insertAsync(value) 208 | end 209 | end 210 | 211 | -- Pop the item at the back 212 | -- The function will wait untill the vector has more than one item. 213 | function Vector_:popBack() 214 | while self:size() < 1 do 215 | self.mutex:lock() 216 | self.inserted_condition:wait(self.mutex) 217 | self.mutex:unlock() 218 | end 219 | local value, removed = self:remove() 220 | 221 | while removed ~= true do 222 | while self:size() < 1 do 223 | self.mutex:lock() 224 | self.inserted_condition:wait(self.mutex) 225 | self.mutex:unlock() 226 | end 227 | value, removed = self:remove() 228 | end 229 | 230 | return value, removed 231 | end 232 | 233 | -- Pop the item at the back asynchronously 234 | -- If vector is smaller than 1 or there are other threads accessing it, return 235 | -- immediately 236 | function Vector_:popBackAsync() 237 | local size = self:sizeAsync() 238 | if size and size > 0 then 239 | return self:removeAsync() 240 | end 241 | end 242 | 243 | -- Get the item 244 | function Vector_:get(index) 245 | local storage_string, status = self.vector:read( 246 | function (vector) 247 | return vector[index], true 248 | end) 249 | if storage_string then 250 | local storage = torch.CharStorage():string(storage_string) 251 | return self.serializer:retain(storage), status 252 | else 253 | return nil, status 254 | end 255 | end 256 | 257 | -- Get the item asynchronously 258 | function Vector_:getAsync(index) 259 | local storage_string, status = self.vector:readAsync( 260 | function (vector) 261 | return vector[index], true 262 | end) 263 | if storage_string then 264 | local storage = torch.CharStorage():string(storage_string) 265 | return self.serializer:retain(storage), status 266 | else 267 | return nil, status 268 | end 269 | end 270 | 271 | -- Set the item 272 | function Vector_:set(index, value) 273 | local storage = self.serializer:save(value) 274 | local count, old_value = self.vector:write( 275 | function (vector) 276 | local count = 0 277 | local old_value 278 | if index > #vector then 279 | local nil_string = self.serializer:save(nil):string() 280 | for i = #vector + 1, index do 281 | vector[i] = nil_string 282 | end 283 | count = index - #vector 284 | else 285 | -- Load serialized data to remove dangling values 286 | old_value = self.serializer:load( 287 | torch.CharStorage():string(vector[index])) 288 | end 289 | vector[index] = storage:string() 290 | return count, old_value 291 | end) 292 | if count == nil then 293 | return nil, old_value 294 | else 295 | for i = 1, count do 296 | self.inserted_condition:signal() 297 | end 298 | return true, old_value 299 | end 300 | end 301 | 302 | -- Set the item asynchronously 303 | function Vector_:setAsync(index, value) 304 | local storage = self.serializer:save(value) 305 | local count, old_value = self.vector:writeAsync( 306 | function (vector) 307 | local count = 0 308 | local old_value 309 | if index > #vector then 310 | local nil_string = self.serializer:save(nil):string() 311 | for i = #vector + 1, index do 312 | vector[i] = nil_string 313 | end 314 | count = index - #vector 315 | else 316 | -- Load serialized data to remove dangling values 317 | old_value = self.serializer:load( 318 | torch.CharStorage():string(vector[index])) 319 | end 320 | vector[index] = storage:string() 321 | return count, old_value 322 | end) 323 | if count == nil then 324 | return nil, old_value 325 | else 326 | for i = 1, count do 327 | self.inserted_condition:signal() 328 | end 329 | return true, old_value 330 | end 331 | end 332 | 333 | -- Read synchronously 334 | function Vector_:read(index, callback) 335 | return self.vector:read( 336 | function (vector) 337 | local value 338 | local storage_string = vector[index] 339 | if storage_string then 340 | local storage = torch.CharStorage():string(storage_string) 341 | value = self.serializer:retain(storage) 342 | end 343 | return callback(value) 344 | end) 345 | end 346 | 347 | -- Read asynchronously 348 | function Vector_:readAsync(index, callback) 349 | return self.vector:readAsync( 350 | function (vector) 351 | local value 352 | local storage_string = vector[index] 353 | if storage_string then 354 | local storage = torch.CharStorage():string(storage_string) 355 | value = self.serializer:retain(storage) 356 | end 357 | return callback(value) 358 | end) 359 | end 360 | 361 | -- Write synchronously 362 | function Vector_:write(index, callback) 363 | local count, new_value = self.vector:write( 364 | function (vector) 365 | local value 366 | local storage_string = vector[index] 367 | if storage_string then 368 | local storage = torch.CharStorage():string(storage_string) 369 | value = self.serializer:load(storage) 370 | end 371 | local new_value = callback(value) 372 | local new_storage = self.serializer:save(new_value) 373 | local count = 0 374 | if index > #vector then 375 | local nil_string = self.serializer:save(nil):string() 376 | for i = #vector + 1, index do 377 | vector[i] = nil_string 378 | end 379 | count = index - #vector 380 | end 381 | vector[index] = new_storage:string() 382 | return count, new_value 383 | end) 384 | 385 | if count == nil then 386 | return nil, nil 387 | else 388 | for i = 1, count do 389 | self.inserted_condition:signal() 390 | end 391 | return true, new_value 392 | end 393 | end 394 | 395 | -- Write asynchronously 396 | function Vector_:writeAsync(index, callback) 397 | local count, new_value = self.vector:writeAsync( 398 | function (vector) 399 | local value 400 | local storage_string = vector[index] 401 | if storage_string then 402 | local storage = torch.CharStorage():string(storage_string) 403 | value = self.serializer:load(storage) 404 | end 405 | local new_value = callback(value) 406 | local new_storage = self.serializer:save(new_value) 407 | local count = 0 408 | if index > #vector then 409 | local nil_string = self.serializer:save(nil):string() 410 | for i = #vector + 1, index do 411 | vector[i] = nil_string 412 | end 413 | count = index - #vector 414 | end 415 | vector[index] = new_storage:string() 416 | return count, new_value 417 | end) 418 | 419 | if count == nil then 420 | return nil, nil 421 | else 422 | for i = 1, count do 423 | self.inserted_condition:signal() 424 | end 425 | return true, new_value 426 | end 427 | end 428 | 429 | -- Get the size of the vector 430 | function Vector_:size() 431 | return self.vector:read( 432 | function (vector) 433 | return #vector 434 | end) 435 | end 436 | 437 | -- Get the size of the vector asynchronously 438 | function Vector_:sizeAsync() 439 | return self.vector:readAsync( 440 | function (vector) 441 | return #vector 442 | end) 443 | end 444 | 445 | -- Sort the vector 446 | function Vector_:sort(compare) 447 | return self.vector:write( 448 | function (vector) 449 | vector:sort( 450 | function (a, b) 451 | return compare( 452 | self.serializer:retain(torch.CharStorage():string(a)), 453 | self.serializer:retain(torch.CharStorage():string(b))) 454 | end) 455 | return true 456 | end) 457 | end 458 | 459 | -- Sort the vector asynchronously 460 | function Vector_:sortAsync(compare) 461 | return self.vector:writeAsync( 462 | function (vector) 463 | vector:sort( 464 | function (a, b) 465 | return compare( 466 | self.serializer:retain(torch.CharStorage():string(a)), 467 | self.serializer:retain(torch.CharStorage():string(b))) 468 | end) 469 | return true 470 | end) 471 | end 472 | 473 | -- Iterate through all the items 474 | function Vector_:iterator() 475 | local clone = self.vector:read( 476 | function (vector) 477 | local clone = tds.Vec() 478 | for index, value in ipairs(vector) do 479 | clone[index] = 480 | self.serializer:save( 481 | self.serializer:retain( 482 | torch.CharStorage():string(value))):string() 483 | end 484 | return clone 485 | end) 486 | if clone then 487 | local index = 0 488 | return function () 489 | index = index + 1 490 | if index <= #clone then 491 | return index, self.serializer:load( 492 | torch.CharStorage():string(clone[index])) 493 | end 494 | end, true 495 | else 496 | return function() end, false 497 | end 498 | end 499 | 500 | -- Iterate through all the items asynchronously 501 | function Vector_:iteratorAsync() 502 | local clone = self.vector:readAsync( 503 | function (vector) 504 | local clone = tds.Vec() 505 | for index, value in ipairs(vector) do 506 | clone[index] = 507 | self.serializer:save( 508 | self.serializer:retain( 509 | torch.CharStorage():string(value))):string() 510 | end 511 | return clone 512 | end) 513 | if clone then 514 | local index = 0 515 | return function () 516 | index = index + 1 517 | if index <= #clone then 518 | return index, self.serializer:load( 519 | torch.CharStorage():string(clone[index])) 520 | end 521 | end, true 522 | else 523 | return function() end, false 524 | end 525 | end 526 | 527 | -- Convert to string 528 | function Vector_:toString() 529 | return self.vector:read( 530 | function (vector) 531 | return tostring(vector) 532 | end) 533 | end 534 | 535 | -- Convert to string asynchronously 536 | function Vector_:toStringAsync() 537 | return self.vector:readAsync( 538 | function (vector) 539 | return tostring(vector) 540 | end) 541 | end 542 | 543 | -- Free the resources allocated by vector 544 | function Vector_:free() 545 | self.mutex:free() 546 | self.inserted_condition:free() 547 | self.removed_condition:free() 548 | 549 | -- Clean up the memory 550 | local value = self.counter:decrease() 551 | if value == 0 then 552 | -- Cleaning up dangling data 553 | self.vector:write( 554 | function (vector) 555 | for index, value in ipairs(vector) do 556 | local storage = self.serialize:load(value) 557 | end 558 | end) 559 | end 560 | end 561 | 562 | -- The index operator 563 | function Vector_:__index(index) 564 | if type(index) == 'number' then 565 | return self:get(index) 566 | else 567 | local method = Vector_[index] 568 | if method then 569 | return method 570 | else 571 | error('Invalid index (number) or method name') 572 | end 573 | end 574 | end 575 | 576 | -- The new index operator 577 | function Vector_:__newindex(index, value) 578 | if type(index) == 'number' then 579 | return self:set(index, value) 580 | else 581 | rawset(self, index, value) 582 | end 583 | end 584 | 585 | -- Array iterator operator 586 | function Vector_:__ipairs() 587 | return self:iterator() 588 | end 589 | 590 | -- Table iterator operator 591 | function Vector_:__pairs() 592 | return self:__ipairs() 593 | end 594 | 595 | -- Length of the vector 596 | function Vector_:__len() 597 | return self:size() 598 | end 599 | 600 | -- This works for Lua 5.2. For Lua 5.1 / LuaJIT we depend on self.proxy. 601 | function Vector_:__gc() 602 | self:free() 603 | end 604 | 605 | -- To string 606 | function Vector_:__tostring() 607 | return self:toString() 608 | end 609 | 610 | -- Serialization of this object 611 | function Vector_:__write(f) 612 | self.counter:increase(1) 613 | f:writeObject(self.vector) 614 | f:writeObject(self.size_hint) 615 | f:writeObject(self.counter) 616 | 617 | local mutex = threads.Mutex(self.mutex:id()) 618 | f:writeObject(self.mutex:id()) 619 | local inserted_condition = threads.Condition(self.inserted_condition:id()) 620 | f:writeObject(self.inserted_condition:id()) 621 | local removed_condition = threads.Condition(self.removed_condition:id()) 622 | f:writeObject(self.removed_condition:id()) 623 | end 624 | 625 | -- Deserialization of this object 626 | function Vector_:__read(f) 627 | self.serializer = Serializer() 628 | 629 | self.vector = f:readObject() 630 | self.size_hint = f:readObject() 631 | self.counter = f:readObject() 632 | 633 | self.mutex = threads.Mutex(f:readObject()) 634 | self.mutex:free() 635 | self.inserted_condition = threads.Condition(f:readObject()) 636 | self.inserted_condition:free() 637 | self.removed_condition = threads.Condition(f:readObject()) 638 | self.removed_condition:free() 639 | 640 | -- Lua 5.1 / LuaJIT garbage collection 641 | if newproxy then 642 | self.proxy = newproxy(true) 643 | getmetatable(self.proxy).__gc = function () self:__gc() end 644 | end 645 | end 646 | 647 | -- Return the class, not the metatable 648 | return tunnel.Vector 649 | -------------------------------------------------------------------------------- /unittest/vector.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | Unittest for vector 3 | Copyright 2016 Xiang Zhang 4 | --]] 5 | 6 | local Vector = require('tunnel.vector') 7 | 8 | local threads = require('threads') 9 | local torch = require('torch') 10 | 11 | -- A Logic Named Joe 12 | local joe = {} 13 | 14 | function joe.main() 15 | if joe.init then 16 | print('Initializing testing environment') 17 | joe:init() 18 | end 19 | for name, func in pairs(joe) do 20 | if type(name) == 'string' and type(func) == 'function' 21 | and name:match('[%g]+Test') then 22 | print('\nExecuting '..name) 23 | func(joe) 24 | end 25 | end 26 | end 27 | 28 | function joe:init() 29 | local vector = Vector(5) 30 | self.vector = vector 31 | self.print_mutex = threads.Mutex() 32 | end 33 | 34 | function joe:insertRemoveTest() 35 | -- 3 synchronous insert threads 36 | local sync_insert_block = threads.Threads(3, self:threadInit()) 37 | sync_insert_block:specific(true) 38 | for i = 1, 3 do 39 | local job = self:syncInsertJob() 40 | sync_insert_block:addjob(i, job) 41 | end 42 | 43 | -- 2 synchronous remove threads 44 | local sync_remove_block = threads.Threads(2, self:threadInit()) 45 | sync_remove_block:specific(true) 46 | for i = 1, 2 do 47 | local job = self:syncRemoveJob() 48 | sync_remove_block:addjob(i, job) 49 | end 50 | 51 | -- 3 asynchronous insert threads 52 | local async_insert_block = threads.Threads(3, self:threadInit()) 53 | async_insert_block:specific(true) 54 | for i = 1, 3 do 55 | local job = self:asyncInsertJob() 56 | async_insert_block:addjob(i, job) 57 | end 58 | 59 | -- 2 asynchronous remove threads 60 | local async_remove_block = threads.Threads(2, self:threadInit()) 61 | async_remove_block:specific(true) 62 | for i = 1, 2 do 63 | local job = self:asyncRemoveJob() 64 | async_remove_block:addjob(i, job) 65 | end 66 | 67 | -- 1 synchronous iterator thread 68 | local sync_iterator_block = threads.Threads(1, self:threadInit()) 69 | sync_iterator_block:specific(true) 70 | for i = 1, 1 do 71 | local job = self:syncIteratorJob() 72 | sync_iterator_block:addjob(i, job) 73 | end 74 | 75 | -- 1 asynchronous iterator thread 76 | local async_iterator_block = threads.Threads(1, self:threadInit()) 77 | async_iterator_block:specific(true) 78 | for i = 1, 1 do 79 | local job = self:asyncIteratorJob() 80 | async_iterator_block:addjob(i, job) 81 | end 82 | 83 | -- 1 synchronous tostring thread 84 | local sync_tostring_block = threads.Threads(1, self:threadInit()) 85 | sync_tostring_block:specific(true) 86 | for i = 1, 1 do 87 | local job = self:syncToStringJob() 88 | sync_tostring_block:addjob(i, job) 89 | end 90 | 91 | -- 1 Asynchronous tostring thread 92 | local async_tostring_block = threads.Threads(1, self:threadInit()) 93 | async_tostring_block:specific(true) 94 | for i = 1, 1 do 95 | local job = self:asyncToStringJob() 96 | async_tostring_block:addjob(i, job) 97 | end 98 | 99 | sync_insert_block:terminate() 100 | sync_remove_block:terminate() 101 | async_insert_block:terminate() 102 | async_remove_block:terminate() 103 | sync_iterator_block:terminate() 104 | async_iterator_block:terminate() 105 | sync_tostring_block:terminate() 106 | async_tostring_block:terminate() 107 | end 108 | 109 | function joe:pushPopTest() 110 | -- 3 synchronous pushFront threads 111 | local sync_pushfront_block = threads.Threads(3, self:threadInit()) 112 | sync_pushfront_block:specific(true) 113 | for i = 1, 3 do 114 | local job = self:syncPushFrontJob() 115 | sync_pushfront_block:addjob(i, job) 116 | end 117 | 118 | -- 2 synchronous popFront threads 119 | local sync_popfront_block = threads.Threads(2, self:threadInit()) 120 | sync_popfront_block:specific(true) 121 | for i = 1, 2 do 122 | local job = self:syncPopFrontJob() 123 | sync_popfront_block:addjob(i, job) 124 | end 125 | 126 | -- 3 asynchronous pushFront threads 127 | local async_pushfront_block = threads.Threads(3, self:threadInit()) 128 | async_pushfront_block:specific(true) 129 | for i = 1, 3 do 130 | local job = self:asyncPushFrontJob() 131 | async_pushfront_block:addjob(i, job) 132 | end 133 | 134 | -- 2 asynchronous popFront threads 135 | local async_popfront_block = threads.Threads(2, self:threadInit()) 136 | async_popfront_block:specific(true) 137 | for i = 1, 2 do 138 | local job = self:asyncPopFrontJob() 139 | async_popfront_block:addjob(i, job) 140 | end 141 | 142 | -- 3 synchronous pushBack threads 143 | local sync_pushback_block = threads.Threads(3, self:threadInit()) 144 | sync_pushback_block:specific(true) 145 | for i = 1, 3 do 146 | local job = self:syncPushBackJob() 147 | sync_pushback_block:addjob(i, job) 148 | end 149 | 150 | -- 2 synchronous popBack threads 151 | local sync_popback_block = threads.Threads(2, self:threadInit()) 152 | sync_popback_block:specific(true) 153 | for i = 1, 2 do 154 | local job = self:syncPopBackJob() 155 | sync_popback_block:addjob(i, job) 156 | end 157 | 158 | -- 3 asynchronous pushBack threads 159 | local async_pushback_block = threads.Threads(3, self:threadInit()) 160 | async_pushback_block:specific(true) 161 | for i = 1, 3 do 162 | local job = self:asyncPushBackJob() 163 | async_pushback_block:addjob(i, job) 164 | end 165 | 166 | -- 2 asynchronous popBack threads 167 | local async_popback_block = threads.Threads(2, self:threadInit()) 168 | async_popback_block:specific(true) 169 | for i = 1, 2 do 170 | local job = self:asyncPopBackJob() 171 | async_popback_block:addjob(i, job) 172 | end 173 | 174 | -- 1 synchronous iterator thread 175 | local sync_iterator_block = threads.Threads(1, self:threadInit()) 176 | sync_iterator_block:specific(true) 177 | for i = 1, 1 do 178 | local job = self:syncIteratorJob() 179 | sync_iterator_block:addjob(i, job) 180 | end 181 | 182 | -- 1 asynchronous iterator thread 183 | local async_iterator_block = threads.Threads(1, self:threadInit()) 184 | async_iterator_block:specific(true) 185 | for i = 1, 1 do 186 | local job = self:asyncIteratorJob() 187 | async_iterator_block:addjob(i, job) 188 | end 189 | 190 | -- 1 synchronous tostring thread 191 | local sync_tostring_block = threads.Threads(1, self:threadInit()) 192 | sync_tostring_block:specific(true) 193 | for i = 1, 1 do 194 | local job = self:syncToStringJob() 195 | sync_tostring_block:addjob(i, job) 196 | end 197 | 198 | -- 1 Asynchronous tostring thread 199 | local async_tostring_block = threads.Threads(1, self:threadInit()) 200 | async_tostring_block:specific(true) 201 | for i = 1, 1 do 202 | local job = self:asyncToStringJob() 203 | async_tostring_block:addjob(i, job) 204 | end 205 | 206 | sync_pushfront_block:terminate() 207 | sync_popfront_block:terminate() 208 | async_pushfront_block:terminate() 209 | async_popfront_block:terminate() 210 | sync_pushback_block:terminate() 211 | sync_popback_block:terminate() 212 | async_pushback_block:terminate() 213 | async_popback_block:terminate() 214 | sync_iterator_block:terminate() 215 | async_iterator_block:terminate() 216 | sync_tostring_block:terminate() 217 | async_tostring_block:terminate() 218 | end 219 | 220 | function joe:getSetTest() 221 | -- 3 synchronous insert threads 222 | local sync_insert_block = threads.Threads(3, self:threadInit()) 223 | sync_insert_block:specific(true) 224 | for i = 1, 3 do 225 | local job = self:syncInsertJob() 226 | sync_insert_block:addjob(i, job) 227 | end 228 | 229 | -- 2 synchronous get threads 230 | local sync_get_block = threads.Threads(2, self:threadInit()) 231 | sync_get_block:specific(true) 232 | for i = 1, 2 do 233 | local job = self:syncGetJob() 234 | sync_get_block:addjob(i, job) 235 | end 236 | 237 | -- 2 synchronous set threads 238 | local sync_set_block = threads.Threads(2, self:threadInit()) 239 | sync_set_block:specific(true) 240 | for i = 1, 2 do 241 | local job = self:syncSetJob() 242 | sync_set_block:addjob(i, job) 243 | end 244 | 245 | -- 2 asynchronous get threads 246 | local async_get_block = threads.Threads(2, self:threadInit()) 247 | async_get_block:specific(true) 248 | for i = 1, 2 do 249 | local job = self:asyncGetJob() 250 | async_get_block:addjob(i, job) 251 | end 252 | 253 | -- 2 asynchronous set threads 254 | local async_set_block = threads.Threads(2, self:threadInit()) 255 | async_set_block:specific(true) 256 | for i = 1, 2 do 257 | local job = self:asyncSetJob() 258 | async_set_block:addjob(i, job) 259 | end 260 | 261 | -- 1 synchronous sort thread 262 | local sync_sort_block = threads.Threads(1, self:threadInit()) 263 | sync_sort_block:specific(true) 264 | for i = 1, 1 do 265 | local job = self:syncSortJob() 266 | sync_sort_block:addjob(i, job) 267 | end 268 | 269 | -- 1 synchronous sort thread 270 | local async_sort_block = threads.Threads(1, self:threadInit()) 271 | async_sort_block:specific(true) 272 | for i = 1, 1 do 273 | local job = self:asyncSortJob() 274 | async_sort_block:addjob(i, job) 275 | end 276 | 277 | -- 1 synchronous iterator thread 278 | local sync_iterator_block = threads.Threads(1, self:threadInit()) 279 | sync_iterator_block:specific(true) 280 | for i = 1, 1 do 281 | local job = self:syncIteratorJob() 282 | sync_iterator_block:addjob(i, job) 283 | end 284 | 285 | -- 1 asynchronous iterator thread 286 | local async_iterator_block = threads.Threads(1, self:threadInit()) 287 | async_iterator_block:specific(true) 288 | for i = 1, 1 do 289 | local job = self:asyncIteratorJob() 290 | async_iterator_block:addjob(i, job) 291 | end 292 | 293 | sync_insert_block:terminate() 294 | sync_get_block:terminate() 295 | sync_set_block:terminate() 296 | async_get_block:terminate() 297 | async_set_block:terminate() 298 | sync_sort_block:terminate() 299 | async_sort_block:terminate() 300 | sync_iterator_block:terminate() 301 | async_iterator_block:terminate() 302 | end 303 | 304 | function joe:readWriteTest() 305 | -- 3 synchronous insert threads 306 | local sync_insert_block = threads.Threads(3, self:threadInit()) 307 | sync_insert_block:specific(true) 308 | for i = 1, 3 do 309 | local job = self:syncInsertJob() 310 | sync_insert_block:addjob(i, job) 311 | end 312 | 313 | -- 2 synchronous read threads 314 | local sync_read_block = threads.Threads(2, self:threadInit()) 315 | sync_read_block:specific(true) 316 | for i = 1, 2 do 317 | local job = self:syncReadJob() 318 | sync_read_block:addjob(i, job) 319 | end 320 | 321 | -- 2 synchronous write threads 322 | local sync_write_block = threads.Threads(2, self:threadInit()) 323 | sync_write_block:specific(true) 324 | for i = 1, 2 do 325 | local job = self:syncWriteJob() 326 | sync_write_block:addjob(i, job) 327 | end 328 | 329 | -- 2 asynchronous read threads 330 | local async_read_block = threads.Threads(2, self:threadInit()) 331 | async_read_block:specific(true) 332 | for i = 1, 2 do 333 | local job = self:asyncReadJob() 334 | async_read_block:addjob(i, job) 335 | end 336 | 337 | -- 2 asynchronous write threads 338 | local async_write_block = threads.Threads(2, self:threadInit()) 339 | async_write_block:specific(true) 340 | for i = 1, 2 do 341 | local job = self:asyncWriteJob() 342 | async_write_block:addjob(i, job) 343 | end 344 | 345 | -- 1 synchronous sort thread 346 | local sync_sort_block = threads.Threads(1, self:threadInit()) 347 | sync_sort_block:specific(true) 348 | for i = 1, 1 do 349 | local job = self:syncSortJob() 350 | sync_sort_block:addjob(i, job) 351 | end 352 | 353 | -- 1 synchronous sort thread 354 | local async_sort_block = threads.Threads(1, self:threadInit()) 355 | async_sort_block:specific(true) 356 | for i = 1, 1 do 357 | local job = self:asyncSortJob() 358 | async_sort_block:addjob(i, job) 359 | end 360 | 361 | -- 1 synchronous iterator thread 362 | local sync_iterator_block = threads.Threads(1, self:threadInit()) 363 | sync_iterator_block:specific(true) 364 | for i = 1, 1 do 365 | local job = self:syncIteratorJob() 366 | sync_iterator_block:addjob(i, job) 367 | end 368 | 369 | -- 1 asynchronous iterator thread 370 | local async_iterator_block = threads.Threads(1, self:threadInit()) 371 | async_iterator_block:specific(true) 372 | for i = 1, 1 do 373 | local job = self:asyncIteratorJob() 374 | async_iterator_block:addjob(i, job) 375 | end 376 | 377 | sync_insert_block:terminate() 378 | sync_read_block:terminate() 379 | sync_write_block:terminate() 380 | async_read_block:terminate() 381 | async_write_block:terminate() 382 | sync_sort_block:terminate() 383 | async_sort_block:terminate() 384 | sync_iterator_block:terminate() 385 | async_iterator_block:terminate() 386 | end 387 | 388 | function joe:threadInit() 389 | return function() 390 | local torch = require('torch') 391 | local Vector = require('tunnel.vector') 392 | end 393 | end 394 | 395 | function joe:syncInsertJob() 396 | local print_mutex_id = self.print_mutex:id() 397 | local vector = self.vector 398 | return function() 399 | local ffi = require('ffi') 400 | local math = require('math') 401 | local os = require('os') 402 | local threads = require('threads') 403 | 404 | math.randomseed(os.time() + __threadid) 405 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 406 | local print_mutex = threads.Mutex(print_mutex_id) 407 | for i = 1, 10 do 408 | local index = math.random(3) 409 | local value = 10000 + __threadid * 1000 + i 410 | local status = vector:insert(index, value) 411 | if status == true then 412 | print_mutex:lock() 413 | print('sync_insert', __threadid, i, index, value) 414 | print_mutex:unlock() 415 | else 416 | print_mutex:lock() 417 | print('sync_insert', __threadid, i, index, value, 'blocked') 418 | print_mutex:unlock() 419 | end 420 | ffi.C.sleep(6) 421 | end 422 | end 423 | end 424 | 425 | function joe:syncRemoveJob() 426 | local print_mutex_id = self.print_mutex:id() 427 | local vector = self.vector 428 | return function() 429 | local ffi = require('ffi') 430 | local math = require('math') 431 | local os = require('os') 432 | local threads = require('threads') 433 | 434 | math.randomseed(os.time() + __threadid) 435 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 436 | local print_mutex = threads.Mutex(print_mutex_id) 437 | for i = 1, 20 do 438 | local index = math.random(10) 439 | local value, status = vector:remove(index) 440 | if status == true then 441 | print_mutex:lock() 442 | print('sync_remove', __threadid, i, index, value) 443 | print_mutex:unlock() 444 | else 445 | print_mutex:lock() 446 | print('sync_remove', __threadid, i, index, value, 'blocked') 447 | print_mutex:unlock() 448 | end 449 | ffi.C.sleep(3) 450 | end 451 | end 452 | end 453 | 454 | function joe:asyncInsertJob() 455 | local print_mutex_id = self.print_mutex:id() 456 | local vector = self.vector 457 | return function() 458 | local ffi = require('ffi') 459 | local math = require('math') 460 | local os = require('os') 461 | local threads = require('threads') 462 | 463 | math.randomseed(os.time() + __threadid) 464 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 465 | local print_mutex = threads.Mutex(print_mutex_id) 466 | for i = 1, 10 do 467 | local index = math.random(3) 468 | local value = 20000 + __threadid * 1000 + i 469 | local status = vector:insertAsync(index, value) 470 | if status == true then 471 | print_mutex:lock() 472 | print('async_insert', __threadid, i, index, value) 473 | print_mutex:unlock() 474 | else 475 | print_mutex:lock() 476 | print('async_insert', __threadid, i, index, value, 'blocked') 477 | print_mutex:unlock() 478 | end 479 | ffi.C.sleep(6) 480 | end 481 | end 482 | end 483 | 484 | function joe:asyncRemoveJob() 485 | local print_mutex_id = self.print_mutex:id() 486 | local vector = self.vector 487 | return function() 488 | local ffi = require('ffi') 489 | local math = require('math') 490 | local os = require('os') 491 | local threads = require('threads') 492 | 493 | math.randomseed(os.time() + __threadid) 494 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 495 | local print_mutex = threads.Mutex(print_mutex_id) 496 | for i = 1, 20 do 497 | local index = math.random(10) 498 | local value, status = vector:removeAsync(index) 499 | if status == true then 500 | print_mutex:lock() 501 | print('async_remove', __threadid, i, index, value) 502 | print_mutex:unlock() 503 | else 504 | print_mutex:lock() 505 | print('async_remove', __threadid, i, index, value, 'blocked') 506 | print_mutex:unlock() 507 | end 508 | ffi.C.sleep(3) 509 | end 510 | end 511 | end 512 | 513 | function joe:syncIteratorJob() 514 | local print_mutex_id = self.print_mutex:id() 515 | local vector = self.vector 516 | return function() 517 | local ffi = require('ffi') 518 | local io = require('io') 519 | local math = require('math') 520 | local os = require('os') 521 | local threads = require('threads') 522 | 523 | math.randomseed(os.time() + __threadid) 524 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 525 | local print_mutex = threads.Mutex(print_mutex_id) 526 | for i = 1, 30 do 527 | local status = true 528 | local iterator = pairs(vector) 529 | if status == true then 530 | print_mutex:lock() 531 | io.write('sync_iterator', '\t', __threadid, '\t', i, '\t{') 532 | for index, value in iterator do 533 | io.write(index, ':', tostring(value), ',') 534 | end 535 | io.write('}\n') 536 | io.flush() 537 | print_mutex:unlock() 538 | else 539 | print_mutex:lock() 540 | print('sync_iterator', __threadid, i, 'blocked') 541 | print_mutex:unlock() 542 | end 543 | ffi.C.sleep(2) 544 | end 545 | end 546 | end 547 | 548 | function joe:asyncIteratorJob() 549 | local print_mutex_id = self.print_mutex:id() 550 | local vector = self.vector 551 | return function() 552 | local ffi = require('ffi') 553 | local io = require('io') 554 | local math = require('math') 555 | local os = require('os') 556 | local threads = require('threads') 557 | 558 | math.randomseed(os.time() + __threadid) 559 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 560 | local print_mutex = threads.Mutex(print_mutex_id) 561 | for i = 1, 20 do 562 | local iterator, status = vector:iteratorAsync() 563 | if status == true then 564 | print_mutex:lock() 565 | io.write('async_iterator', '\t', __threadid, '\t', i, '\t{') 566 | for index, value in iterator do 567 | io.write(index, ':', tostring(value), ',') 568 | end 569 | io.write('}\n') 570 | io.flush() 571 | print_mutex:unlock() 572 | else 573 | print_mutex:lock() 574 | print('async_iterator', __threadid, i, 'blocked') 575 | print_mutex:unlock() 576 | end 577 | ffi.C.sleep(3) 578 | end 579 | end 580 | end 581 | 582 | function joe:syncToStringJob() 583 | local print_mutex_id = self.print_mutex:id() 584 | local vector = self.vector 585 | return function() 586 | local ffi = require('ffi') 587 | local math = require('math') 588 | local os = require('os') 589 | local threads = require('threads') 590 | 591 | math.randomseed(os.time() + __threadid) 592 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 593 | local print_mutex = threads.Mutex(print_mutex_id) 594 | for i = 1, 12 do 595 | local vector_string = tostring(vector) 596 | if vector_string ~= nil then 597 | print_mutex:lock() 598 | print('sync_tostring', __threadid, i, 599 | vector_string:gsub('\n', ','):gsub(' ','')) 600 | print_mutex:unlock() 601 | else 602 | print_mutex:lock() 603 | print('sync_tostring', __threadid, i, 'blocked') 604 | print_mutex:unlock() 605 | end 606 | ffi.C.sleep(5) 607 | end 608 | end 609 | end 610 | 611 | function joe:asyncToStringJob() 612 | local print_mutex_id = self.print_mutex:id() 613 | local vector = self.vector 614 | return function() 615 | local ffi = require('ffi') 616 | local math = require('math') 617 | local os = require('os') 618 | local threads = require('threads') 619 | 620 | math.randomseed(os.time() + __threadid) 621 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 622 | local print_mutex = threads.Mutex(print_mutex_id) 623 | for i = 1, 12 do 624 | local vector_string = vector:toStringAsync() 625 | if vector_string ~= nil then 626 | print_mutex:lock() 627 | print('async_tostring', __threadid, i, 628 | vector_string:gsub('\n', ','):gsub(' ','')) 629 | print_mutex:unlock() 630 | else 631 | print_mutex:lock() 632 | print('async_tostring', __threadid, i, 'blocked') 633 | print_mutex:unlock() 634 | end 635 | ffi.C.sleep(5) 636 | end 637 | end 638 | end 639 | 640 | function joe:syncPushFrontJob() 641 | local print_mutex_id = self.print_mutex:id() 642 | local vector = self.vector 643 | return function() 644 | local ffi = require('ffi') 645 | local math = require('math') 646 | local os = require('os') 647 | local threads = require('threads') 648 | 649 | math.randomseed(os.time() + __threadid) 650 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 651 | local print_mutex = threads.Mutex(print_mutex_id) 652 | for i = 1, 30 do 653 | local value = 30000 + __threadid * 1000 + i 654 | local status = vector:pushFront(value) 655 | if status == true then 656 | print_mutex:lock() 657 | print('sync_pushfront', __threadid, i, value) 658 | print_mutex:unlock() 659 | else 660 | print_mutex:lock() 661 | print('sync_pushfront', __threadid, i, value, 'blocked') 662 | print_mutex:unlock() 663 | end 664 | ffi.C.sleep(2) 665 | end 666 | end 667 | end 668 | 669 | function joe:syncPopFrontJob() 670 | local print_mutex_id = self.print_mutex:id() 671 | local vector = self.vector 672 | return function() 673 | local ffi = require('ffi') 674 | local math = require('math') 675 | local os = require('os') 676 | local threads = require('threads') 677 | 678 | math.randomseed(os.time() + __threadid) 679 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 680 | local print_mutex = threads.Mutex(print_mutex_id) 681 | for i = 1, 30 do 682 | local value, status = vector:popFront() 683 | if status == true then 684 | print_mutex:lock() 685 | print('sync_popfront', __threadid, i, value) 686 | print_mutex:unlock() 687 | else 688 | print_mutex:lock() 689 | print('sync_popfront', __threadid, i, value, 'blocked') 690 | print_mutex:unlock() 691 | end 692 | ffi.C.sleep(3) 693 | end 694 | end 695 | end 696 | 697 | function joe:asyncPushFrontJob() 698 | local print_mutex_id = self.print_mutex:id() 699 | local vector = self.vector 700 | return function() 701 | local ffi = require('ffi') 702 | local math = require('math') 703 | local os = require('os') 704 | local threads = require('threads') 705 | 706 | math.randomseed(os.time() + __threadid) 707 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 708 | local print_mutex = threads.Mutex(print_mutex_id) 709 | for i = 1, 10 do 710 | local value = 40000 + __threadid * 1000 + i 711 | local status = vector:pushFrontAsync(value) 712 | if status == true then 713 | print_mutex:lock() 714 | print('async_pushfront', __threadid, i, value) 715 | print_mutex:unlock() 716 | else 717 | print_mutex:lock() 718 | print('async_pushfront', __threadid, i, value, 'blocked') 719 | print_mutex:unlock() 720 | end 721 | ffi.C.sleep(6) 722 | end 723 | end 724 | end 725 | 726 | function joe:asyncPopFrontJob() 727 | local print_mutex_id = self.print_mutex:id() 728 | local vector = self.vector 729 | return function() 730 | local ffi = require('ffi') 731 | local math = require('math') 732 | local os = require('os') 733 | local threads = require('threads') 734 | 735 | math.randomseed(os.time() + __threadid) 736 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 737 | local print_mutex = threads.Mutex(print_mutex_id) 738 | for i = 1, 30 do 739 | local value, status = vector:popFrontAsync() 740 | if status == true then 741 | print_mutex:lock() 742 | print('async_popfront', __threadid, i, value) 743 | print_mutex:unlock() 744 | else 745 | print_mutex:lock() 746 | print('async_popfront', __threadid, i, value, 'blocked') 747 | print_mutex:unlock() 748 | end 749 | ffi.C.sleep(2) 750 | end 751 | end 752 | end 753 | 754 | function joe:syncPushBackJob() 755 | local print_mutex_id = self.print_mutex:id() 756 | local vector = self.vector 757 | return function() 758 | local ffi = require('ffi') 759 | local math = require('math') 760 | local os = require('os') 761 | local threads = require('threads') 762 | 763 | math.randomseed(os.time() + __threadid) 764 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 765 | local print_mutex = threads.Mutex(print_mutex_id) 766 | for i = 1, 30 do 767 | local value = 50000 + __threadid * 1000 + i 768 | local status = vector:pushBack(value) 769 | if status == true then 770 | print_mutex:lock() 771 | print('sync_pushback', __threadid, i, value) 772 | print_mutex:unlock() 773 | else 774 | print_mutex:lock() 775 | print('sync_pushback', __threadid, i, value, 'blocked') 776 | print_mutex:unlock() 777 | end 778 | ffi.C.sleep(2) 779 | end 780 | end 781 | end 782 | 783 | function joe:syncPopBackJob() 784 | local print_mutex_id = self.print_mutex:id() 785 | local vector = self.vector 786 | return function() 787 | local ffi = require('ffi') 788 | local math = require('math') 789 | local os = require('os') 790 | local threads = require('threads') 791 | 792 | math.randomseed(os.time() + __threadid) 793 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 794 | local print_mutex = threads.Mutex(print_mutex_id) 795 | for i = 1, 30 do 796 | local value, status = vector:popBack() 797 | if status == true then 798 | print_mutex:lock() 799 | print('sync_popback', __threadid, i, value) 800 | print_mutex:unlock() 801 | else 802 | print_mutex:lock() 803 | print('sync_popback', __threadid, i, value, 'blocked') 804 | print_mutex:unlock() 805 | end 806 | ffi.C.sleep(3) 807 | end 808 | end 809 | end 810 | 811 | function joe:asyncPushBackJob() 812 | local print_mutex_id = self.print_mutex:id() 813 | local vector = self.vector 814 | return function() 815 | local ffi = require('ffi') 816 | local math = require('math') 817 | local os = require('os') 818 | local threads = require('threads') 819 | 820 | math.randomseed(os.time() + __threadid) 821 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 822 | local print_mutex = threads.Mutex(print_mutex_id) 823 | for i = 1, 10 do 824 | local value = 40000 + __threadid * 1000 + i 825 | local status = vector:pushBackAsync(value) 826 | if status == true then 827 | print_mutex:lock() 828 | print('async_pushback', __threadid, i, value) 829 | print_mutex:unlock() 830 | else 831 | print_mutex:lock() 832 | print('async_pushback', __threadid, i, value, 'blocked') 833 | print_mutex:unlock() 834 | end 835 | ffi.C.sleep(6) 836 | end 837 | end 838 | end 839 | 840 | function joe:asyncPopBackJob() 841 | local print_mutex_id = self.print_mutex:id() 842 | local vector = self.vector 843 | return function() 844 | local ffi = require('ffi') 845 | local math = require('math') 846 | local os = require('os') 847 | local threads = require('threads') 848 | 849 | math.randomseed(os.time() + __threadid) 850 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 851 | local print_mutex = threads.Mutex(print_mutex_id) 852 | for i = 1, 20 do 853 | local value, status = vector:popBackAsync() 854 | if status == true then 855 | print_mutex:lock() 856 | print('async_popback', __threadid, i, value) 857 | print_mutex:unlock() 858 | else 859 | print_mutex:lock() 860 | print('async_popback', __threadid, i, value, 'blocked') 861 | print_mutex:unlock() 862 | end 863 | ffi.C.sleep(3) 864 | end 865 | end 866 | end 867 | 868 | function joe:syncGetJob() 869 | local print_mutex_id = self.print_mutex:id() 870 | local vector = self.vector 871 | return function() 872 | local ffi = require('ffi') 873 | local math = require('math') 874 | local os = require('os') 875 | local threads = require('threads') 876 | 877 | math.randomseed(os.time() + __threadid) 878 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 879 | local print_mutex = threads.Mutex(print_mutex_id) 880 | for i = 1, 30 do 881 | local index = math.random(10) 882 | local value = vector[index] 883 | local status = true 884 | if status == true then 885 | print_mutex:lock() 886 | print('sync_get', __threadid, i, index, value) 887 | print_mutex:unlock() 888 | else 889 | print_mutex:lock() 890 | print('sync_get', __threadid, i, index, value, 'blocked') 891 | print_mutex:unlock() 892 | end 893 | ffi.C.sleep(2) 894 | end 895 | end 896 | end 897 | 898 | function joe:syncSetJob() 899 | local print_mutex_id = self.print_mutex:id() 900 | local vector = self.vector 901 | return function() 902 | local ffi = require('ffi') 903 | local math = require('math') 904 | local os = require('os') 905 | local threads = require('threads') 906 | 907 | math.randomseed(os.time() + __threadid) 908 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 909 | local print_mutex = threads.Mutex(print_mutex_id) 910 | for i = 1, 60 do 911 | local index = math.random(10) 912 | local value = 50000 + __threadid * 1000 + i 913 | vector[index] = value 914 | local status = true 915 | if status == true then 916 | print_mutex:lock() 917 | print('sync_set', __threadid, i, index, value) 918 | print_mutex:unlock() 919 | else 920 | print_mutex:lock() 921 | print('sync_set', __threadid, i, index, value, 'blocked') 922 | print_mutex:unlock() 923 | end 924 | ffi.C.sleep(1) 925 | end 926 | end 927 | end 928 | 929 | function joe:asyncGetJob() 930 | local print_mutex_id = self.print_mutex:id() 931 | local vector = self.vector 932 | return function() 933 | local ffi = require('ffi') 934 | local math = require('math') 935 | local os = require('os') 936 | local threads = require('threads') 937 | 938 | math.randomseed(os.time() + __threadid) 939 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 940 | local print_mutex = threads.Mutex(print_mutex_id) 941 | for i = 1, 20 do 942 | local index = math.random(10) 943 | local value, status = vector:getAsync(index) 944 | if status == true then 945 | print_mutex:lock() 946 | print('async_get', __threadid, i, index, value) 947 | print_mutex:unlock() 948 | else 949 | print_mutex:lock() 950 | print('async_get', __threadid, i, index, value, 'blocked') 951 | print_mutex:unlock() 952 | end 953 | ffi.C.sleep(3) 954 | end 955 | end 956 | end 957 | 958 | function joe:asyncSetJob() 959 | local print_mutex_id = self.print_mutex:id() 960 | local vector = self.vector 961 | return function() 962 | local ffi = require('ffi') 963 | local math = require('math') 964 | local os = require('os') 965 | local threads = require('threads') 966 | 967 | math.randomseed(os.time() + __threadid) 968 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 969 | local print_mutex = threads.Mutex(print_mutex_id) 970 | for i = 1, 60 do 971 | local index = math.random(10) 972 | local value = 60000 + __threadid * 1000 + i 973 | local status, old_value = vector:setAsync(index, value) 974 | if status == true then 975 | print_mutex:lock() 976 | print('async_set', __threadid, i, index, value, old_value) 977 | print_mutex:unlock() 978 | else 979 | print_mutex:lock() 980 | print( 981 | 'async_set', __threadid, i, index, value, old_value, 'blocked') 982 | print_mutex:unlock() 983 | end 984 | ffi.C.sleep(1) 985 | end 986 | end 987 | end 988 | 989 | function joe:syncSortJob() 990 | local print_mutex_id = self.print_mutex:id() 991 | local vector = self.vector 992 | return function() 993 | local ffi = require('ffi') 994 | local math = require('math') 995 | local os = require('os') 996 | local threads = require('threads') 997 | 998 | math.randomseed(os.time() + __threadid) 999 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 1000 | local print_mutex = threads.Mutex(print_mutex_id) 1001 | for i = 1, 20 do 1002 | local status = vector:sort( 1003 | function (a, b) 1004 | if a == nil then 1005 | return true 1006 | elseif b == nil then 1007 | return false 1008 | end 1009 | return a < b 1010 | end) 1011 | if status == true then 1012 | print_mutex:lock() 1013 | print('sync_sort', __threadid) 1014 | print_mutex:unlock() 1015 | else 1016 | print_mutex:lock() 1017 | print('sync_sort', __threadid, 'blocked') 1018 | print_mutex:unlock() 1019 | end 1020 | ffi.C.sleep(3) 1021 | end 1022 | end 1023 | end 1024 | 1025 | function joe:asyncSortJob() 1026 | local print_mutex_id = self.print_mutex:id() 1027 | local vector = self.vector 1028 | return function() 1029 | local ffi = require('ffi') 1030 | local math = require('math') 1031 | local os = require('os') 1032 | local threads = require('threads') 1033 | 1034 | math.randomseed(os.time() + __threadid) 1035 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 1036 | local print_mutex = threads.Mutex(print_mutex_id) 1037 | for i = 1, 20 do 1038 | local status = vector:sortAsync( 1039 | function (a, b) 1040 | if a == nil then 1041 | return true 1042 | elseif b == nil then 1043 | return false 1044 | end 1045 | return a < b 1046 | end) 1047 | if status == true then 1048 | print_mutex:lock() 1049 | print('async_sort', __threadid) 1050 | print_mutex:unlock() 1051 | else 1052 | print_mutex:lock() 1053 | print('async_sort', __threadid, 'blocked') 1054 | print_mutex:unlock() 1055 | end 1056 | ffi.C.sleep(3) 1057 | end 1058 | end 1059 | end 1060 | 1061 | function joe:syncReadJob() 1062 | local print_mutex_id = self.print_mutex:id() 1063 | local vector = self.vector 1064 | return function() 1065 | local ffi = require('ffi') 1066 | local math = require('math') 1067 | local os = require('os') 1068 | local threads = require('threads') 1069 | 1070 | math.randomseed(os.time() + __threadid) 1071 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 1072 | local print_mutex = threads.Mutex(print_mutex_id) 1073 | for i = 1, 30 do 1074 | local index = math.random(10) 1075 | local status, value = vector:read( 1076 | index, function (value) return true, value end) 1077 | if status == true then 1078 | print_mutex:lock() 1079 | print('sync_read', __threadid, i, index, value) 1080 | print_mutex:unlock() 1081 | else 1082 | print_mutex:lock() 1083 | print('sync_read', __threadid, i, index, value, 'blocked') 1084 | print_mutex:unlock() 1085 | end 1086 | ffi.C.sleep(2) 1087 | end 1088 | end 1089 | end 1090 | 1091 | function joe:syncWriteJob() 1092 | local print_mutex_id = self.print_mutex:id() 1093 | local vector = self.vector 1094 | return function() 1095 | local ffi = require('ffi') 1096 | local math = require('math') 1097 | local os = require('os') 1098 | local threads = require('threads') 1099 | 1100 | math.randomseed(os.time() + __threadid) 1101 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 1102 | local print_mutex = threads.Mutex(print_mutex_id) 1103 | for i = 1, 60 do 1104 | local index = math.random(10) 1105 | local value = 50000 + __threadid * 1000 + i 1106 | local status, value = vector:write( 1107 | index, function (old_value) return value end) 1108 | if status == true then 1109 | print_mutex:lock() 1110 | print('sync_write', __threadid, i, index, value) 1111 | print_mutex:unlock() 1112 | else 1113 | print_mutex:lock() 1114 | print('sync_write', __threadid, i, index, value, 'blocked') 1115 | print_mutex:unlock() 1116 | end 1117 | ffi.C.sleep(1) 1118 | end 1119 | end 1120 | end 1121 | 1122 | function joe:asyncReadJob() 1123 | local print_mutex_id = self.print_mutex:id() 1124 | local vector = self.vector 1125 | return function() 1126 | local ffi = require('ffi') 1127 | local math = require('math') 1128 | local os = require('os') 1129 | local threads = require('threads') 1130 | 1131 | math.randomseed(os.time() + __threadid) 1132 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 1133 | local print_mutex = threads.Mutex(print_mutex_id) 1134 | for i = 1, 30 do 1135 | local index = math.random(10) 1136 | local status, value = vector:readAsync( 1137 | index, function (value) return true, value end) 1138 | if status == true then 1139 | print_mutex:lock() 1140 | print('async_read', __threadid, i, index, value) 1141 | print_mutex:unlock() 1142 | else 1143 | print_mutex:lock() 1144 | print('async_read', __threadid, i, index, value, 'blocked') 1145 | print_mutex:unlock() 1146 | end 1147 | ffi.C.sleep(2) 1148 | end 1149 | end 1150 | end 1151 | 1152 | function joe:asyncWriteJob() 1153 | local print_mutex_id = self.print_mutex:id() 1154 | local vector = self.vector 1155 | return function() 1156 | local ffi = require('ffi') 1157 | local math = require('math') 1158 | local os = require('os') 1159 | local threads = require('threads') 1160 | 1161 | math.randomseed(os.time() + __threadid) 1162 | ffi.cdef('unsigned int sleep(unsigned int seconds);') 1163 | local print_mutex = threads.Mutex(print_mutex_id) 1164 | for i = 1, 60 do 1165 | local index = math.random(10) 1166 | local value = 50000 + __threadid * 1000 + i 1167 | local status, value = vector:writeAsync( 1168 | index, function (old_value) return value end) 1169 | if status == true then 1170 | print_mutex:lock() 1171 | print('async_write', __threadid, i, index, value) 1172 | print_mutex:unlock() 1173 | else 1174 | print_mutex:lock() 1175 | print('async_write', __threadid, i, index, value, 'blocked') 1176 | print_mutex:unlock() 1177 | end 1178 | ffi.C.sleep(1) 1179 | end 1180 | end 1181 | end 1182 | 1183 | joe.main() 1184 | return joe 1185 | --------------------------------------------------------------------------------