├── repopulateGrad.lua ├── penlightTest.lua ├── LICENSE ├── netLighter.lua ├── netConverter.lua └── README.md /repopulateGrad.lua: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- 2 | -- Recursive routine that restore a saved net for further training 3 | -------------------------------------------------------------------------------- 4 | 5 | -- Repopulate the gradWeight through the whole net 6 | function craftWeight(module) 7 | if module.weight then module.gradWeight = module.weight:clone() end 8 | if module.bias then module.gradBias = module.bias :clone() end 9 | module.gradInput = torch.Tensor() 10 | end 11 | 12 | function repopulateGrad(network) 13 | craftWeight(network) 14 | if network.modules then 15 | for _,a in ipairs(network.modules) do 16 | repopulateGrad(a) 17 | end 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /penlightTest.lua: -------------------------------------------------------------------------------- 1 | -- Parsing the command line ---------------------------------------------------- 2 | lapp = require 'pl.lapp' 3 | lapp.slack = true 4 | 5 | args = lapp [[ 6 | Does some calculations 7 | -v, --video (string) Specify input video 8 | -w, --width (default 256) Width of the video 9 | -h, --height (default 144) Height of the video 10 | -t, --time (default 10) Seconds of video to process 11 | -sk,--seek (default 0) Seek number of seconds 12 | -f1,--flag1 A false flag 13 | -f2,--flag2 A false flag 14 | -f3,--flag3 (default true) A true flag 15 | -f4,--flag4 (default true) A true flag 16 | ]] 17 | 18 | -- Plotting the parsed commands ------------------------------------------------ 19 | print 'args =' 20 | print(args) 21 | 22 | pretty = require 'pl.pretty' 23 | pretty.dump(args) 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2013 Alfredo Canziani 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /netLighter.lua: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- 2 | -- Recursive routine that remove unnecessary data from a network 3 | -------------------------------------------------------------------------------- 4 | 5 | -- opt is the global option, opt.save store the default saving location 6 | opt = opt or {} 7 | opt.save = opt.save or './' 8 | 9 | print '==> generating recursive network cleaning routine' 10 | function nilling(module) 11 | module.gradBias = nil 12 | if module.finput then module.finput = torch.Tensor() end 13 | module.gradWeight = nil 14 | module.output = torch.Tensor() 15 | module.fgradInput = nil 16 | module.gradInput = nil 17 | if module.indices then module.indices = torch.Tensor() end 18 | end 19 | 20 | function netLighter(network) 21 | nilling(network) 22 | if network.modules then 23 | for _,a in ipairs(network.modules) do 24 | netLighter(a) 25 | end 26 | end 27 | end 28 | 29 | print '==> generating network saving routine' 30 | function saveNet(name, model) 31 | local filename = paths.concat(opt.save, name) 32 | os.execute('mkdir -p ' .. sys.dirname(filename)) 33 | print('==> saving model to '..filename) 34 | local modelToSave = model:float() 35 | netLighter(modelToSave) 36 | torch.save(filename, modelToSave) 37 | end 38 | -------------------------------------------------------------------------------- /netConverter.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env th 2 | -------------------------------------------------------------------------------- 3 | -- Conversion script: it turns a CUDA network into a CPU one 4 | -------------------------------------------------------------------------------- 5 | -- 6 | -- Iteratively scanning the network, it disregard the transposition modules 7 | -- and convert into and 8 | -- into . Al other modules are copied 9 | -- 10 | -------------------------------------------------------------------------------- 11 | 12 | require 'cunn' 13 | require 'nnx' 14 | 15 | -------------------------------------------------------------------------------- 16 | 17 | if not arg[1] then print "Network unspecified (type it after the program's name)" return 18 | else print('Loading: ' .. arg[1]) end 19 | 20 | cudaNet = torch.load(arg[1]) 21 | cudaCNN = cudaNet.modules[1] 22 | classifier = cudaNet.modules[2] 23 | 24 | torch.setdefaulttensortype('torch.FloatTensor') 25 | 26 | function smartCopy(cudaModule,floatNetwork) 27 | -- if cudaModule.__typename == 'nn.Sequential' then 28 | -- floatNetwork = nn.Sequential() 29 | if cudaModule.__typename == 'nn.SpatialConvolutionCUDA' then 30 | print(' + Converting into ') 31 | floatNetwork:add(nn.SpatialConvolution(cudaModule.nInputPlane, cudaModule.nOutputPlane, cudaModule.kW, cudaModule.kH)) 32 | floatNetwork.modules[#floatNetwork.modules].gradBias = nil 33 | floatNetwork.modules[#floatNetwork.modules].gradWeight = nil 34 | floatNetwork.modules[#floatNetwork.modules].gradInput = nil 35 | floatNetwork.modules[#floatNetwork.modules].weight = cudaModule.weight:transpose(4,3):transpose(3,2):transpose(2,1):float() 36 | floatNetwork.modules[#floatNetwork.modules].bias = cudaModule.bias:float() 37 | elseif cudaModule.__typename == 'nn.SpatialMaxPoolingCUDA' then 38 | print(' + Converting into ') 39 | floatNetwork:add(nn.SpatialMaxPooling(cudaModule.kW, cudaModule.kH, cudaModule.dW, cudaModule.dH)) 40 | --floatNetwork.modules[#floatNetwork.modules].indices = nil 41 | floatNetwork.modules[#floatNetwork.modules].gradInput = nil 42 | elseif cudaModule.__typename ~= 'nn.Transpose' then 43 | print(' + Copying <' .. cudaModule.__typename .. '>') 44 | floatNetwork:add(cudaModule:float()) 45 | end 46 | end 47 | 48 | function convert(cudaNetwork) 49 | local floatNetwork = nn.Sequential() 50 | if cudaNetwork.modules then 51 | for _,a in ipairs(cudaNetwork.modules) do 52 | smartCopy(a,floatNetwork) 53 | end 54 | end 55 | return floatNetwork 56 | end 57 | 58 | print 'Converting to ...' 59 | floatCNN = convert(cudaCNN) 60 | -- print('cuda CNN', cudaCNN) 61 | -- print('float CNN', floatCNN) 62 | 63 | print 'Assembling final float network' 64 | floatNet = nn.Sequential() 65 | print ' + Adding ' 66 | floatNet:add(floatCNN) 67 | print ' + Adding ' 68 | floatNet:add(classifier:float()) 69 | 70 | --[[io.write("Type network's new name without extension: ") 71 | newFileName = io.read() .. '.net']] 72 | newFileName = string.sub(arg[1],1,-5) .. '-float.net' 73 | print('Saving network as <' .. newFileName .. '>') 74 | torch.save(newFileName,floatNet) 75 | 76 | io.write 'Would you like to print the saved network on screen [y/(n)]? ' 77 | if io.read() == 'y' then print(floatNet) end 78 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Torch7-tools 2 | 3 | This would be a collection of useful routines and function currently missing in [`Torch7`](https://github.com/torch/torch7). 4 | Most of the time the scripts are just usefull routines or functions I am using for my job and I am allowed to extract and publish here. 5 | 6 | ## Tools description 7 | 8 | ### Table of contents 9 | - [Network lightener](#network-lightener) 10 | - [Gradient repopulation](#gradient-repopulation) 11 | - [Command line parser](#command-line-parser) 12 | - [Cuda to Float network converter](#cuda-to-float-network-converter) 13 | 14 | ### Important! 15 | The new [`net-toolkit`](https://github.com/Atcold/net-toolkit/tree/master) Torch7 package will be released soon. Basically it will provide a way for easily handling networks and it will include the following two functions, which are to be considered therefore deprecated. 16 | 17 | > ### Network lightener 18 | > `netLighter` library provides a `saveNet(name, network)` function which saves a lighter version of your current network, removing all unnecessary data from it (such as *gradients*, *temporal data* and etc...). The default location is `./`; if a different one is preferred instead, you may want to specify it with a global option `opt.save = 'my-path/'`. Usage: 19 | 20 | > ```lua 21 | > -- if './' is not ok: 22 | > opt = opt or {} 23 | > opt.save = 'my-path/' 24 | > -- otherwise, it will be sufficient only: 25 | > require 'netLighter' 26 | > -- net = nn.Sequential() and other stuff 27 | > saveNet('myNet.net',net) 28 | > ``` 29 | 30 | > ### Gradient repopulation 31 | > Let's say we would like to load a network we have previously saved with `saveNet()` for continuing a training > session on it. Some inner parameters (something about *gradients*) have to be restored, since `saveNet()` did a pruning operation on the network in order to save space. Here is how we can handle this case: 32 | 33 | > ```lua 34 | > local networkFile = '/path/To/MyNet.net' 35 | > model = torch.load(networkFile) 36 | > repopulateGrad(model) 37 | > ``` 38 | 39 | > Now we can keep training, perhaps without forgetting to define a *criterion* `loss` (the criterion is not saved with the network, so we have to re-define it, if we don't already do it somewhere else in the code). 40 | 41 | ### Command line parser 42 | `penlightTest` shows a great deal of command line parser options that could turn helpful when we need to send some initial configuration values to the script in a compact manner. Running the script in `lua` (or `torch`) with no argument will print on screen the help screen (usually reachable with the option `--help` or `-h`, which in **this** case has been deliberately overwritten to be the `height` handle). 43 | ```bash 44 | lua penlightTest.lua 45 | ``` 46 | Moreover, you can notice the presence of `default false` and `default true` flags and short *multi-letter* handles as well, which are not iterpreted as in the Unix environment (say like `ls -al`) because of `slack = true` of `lapp` has been (intentionally) set to `true`. 47 | You can run the test like this (it will show everything it can do that I care) 48 | ```bash 49 | lua penlightTest.lua -v abc --time 40 -h 20 -sk 15 --flag1 -f3 50 | ``` 51 | The same two lines above could have been written by sustituting `lua` with `torch` (and this is what I usually do). Further documentation on the `lapp` command line parser can be found [here](https://github.com/stevedonovan/Penlight/blob/master/doc/manual/08-additional.md#command-line-programs-with-lapp). 52 | 53 | ### Cuda to Float network converter 54 | `netConverter` converts a `cuda` network into an equivalent `float` one. 55 | ```bash 56 | ./netConverter.lua ../results/multinet-cuda.net 57 | ``` 58 | The scripts automatically changes the extension to `-float.net` and saves the new network in the same location of the input one (which is specified as argument of the routine, as you can see from the example above). 59 | --------------------------------------------------------------------------------