├── datasources
├── datasource.lua
├── get_datasource.py
├── classdatasource.lua
├── thread.lua
├── augment.lua
└── ucf101.lua
├── expand.lua
├── upsample.lua
├── README.txt
├── image_error_measures.lua
├── test-frame-prediction-on-ucf-rec_gdl.lua
├── get_model.lua
├── train_iclr_model.lua
└── LICENSE
/datasources/datasource.lua:
--------------------------------------------------------------------------------
1 | require 'torch'
2 |
3 | require 'datasources.classdatasource'
--------------------------------------------------------------------------------
/expand.lua:
--------------------------------------------------------------------------------
1 | require 'nn'
2 |
3 | local ExpandDim, parent = torch.class('nn.ExpandDim', 'nn.Module')
4 |
5 | -- expand dim d (must be 1 in the input) k times
6 | function ExpandDim:__init(d, k)
7 | parent:__init(self)
8 | self.d = d
9 | self.k = k
10 | --self.output = torch.Tensor()
11 | self.gradInput = torch.Tensor()
12 | end
13 |
14 | function ExpandDim:updateOutput(input)
15 | assert(input:size(self.d) == 1)
16 | local dims = input:size():totable()
17 | dims[self.d] = self.k
18 | --self.output:resize(unpack(dims))
19 | --self.output:copy(input:expand(unpack(dims)))
20 | self.output = input:expand(unpack(dims))
21 | return self.output
22 | end
23 |
24 | function ExpandDim:updateGradInput(input, gradOutput)
25 | self.gradInput:resizeAs(input)
26 | self.gradInput:sum(gradOutput, self.d)
27 | return self.gradInput
28 | end
29 |
--------------------------------------------------------------------------------
/datasources/get_datasource.py:
--------------------------------------------------------------------------------
1 | #!/usr/local/bin
2 | import os
3 |
4 | datafolder = 'ucf101'
5 |
6 | os.system("""
7 | cd %s
8 | wget http://crcv.ucf.edu/data/UCF101/UCF101.rar
9 | unrar e UCF101.rar
10 | wget http://crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip
11 | unzip UCF101TrainTestSplits-RecognitionTask.zip
12 | mv ucfTrainTestlist/* .
13 | rmdir ucfTrainTestlist
14 | """%(datafolder))
15 |
16 | files = os.listdir(datafolder)
17 | classes = set()
18 | for f in files:
19 | if f.find('.avi') != -1:
20 | cl = f[:f.find('_g')]
21 | assert(cl[:2] == 'v_')
22 | cl = cl[2:]
23 | classes.add(cl)
24 |
25 | for cl in classes:
26 | os.mkdir('%s/%s'%(datafolder, cl))
27 |
28 | for f in files:
29 | if f.find('.avi') != -1:
30 | cl = f[:f.find('_g')]
31 | assert(cl[:2] == 'v_')
32 | cl = cl[2:]
33 | os.system('mv %s/%s %s/%s'%(datafolder, f, datafolder, cl))
34 |
35 | # The following line fixes a bug in the dataset.
36 | os.system('mv %s/HandStandPushups %s/HandstandPushups'%(datafolder, datafolder))
37 |
--------------------------------------------------------------------------------
/upsample.lua:
--------------------------------------------------------------------------------
1 | require 'nn'
2 | require 'math'
3 |
4 | local SpatialUpSample, parent = torch.class('nn.SpatialUpSample', 'nn.Module')
5 |
6 | -- for now, assume square input
7 | function SpatialUpSample:__init(inputH, outputH)
8 | parent.__init(self)
9 | self.h = inputH
10 | self.H = outputH
11 | self.M = torch.zeros(self.H, self.h)
12 | local s = self.H / self.h
13 | for k = 1, self.h do
14 | for x = 1, self.H do
15 | local v = math.max(0, 1 - math.abs((x-1) / s - (s-1)/(2*s) - k + 1))
16 | self.M[x][k] = v
17 | end
18 | end
19 | -- fix the first and last lines:
20 | self.M:cdiv(self.M:sum(2):expandAs(self.M))
21 | self.output = torch.Tensor()
22 | self.tmp = torch.Tensor()
23 | self.tmp2 = torch.Tensor()
24 | self.gradInput = torch.Tensor()
25 | end
26 |
27 | function SpatialUpSample:updateOutput(input)
28 | assert(input:dim() == 4)
29 | local bsz, nfeature = input:size(1), input:size(2)
30 | local h, w = input:size(3), input:size(4)
31 | assert(h == self.h)
32 | assert(w == self.h)
33 | self.tmp:resize(bsz*nfeature*h, self.H)
34 | self.tmp:mm(input:view(bsz*nfeature*h, w), self.M:t())
35 | self.tmp = self.tmp:view(bsz*nfeature, h, self.H)
36 | self.tmp2:resize(bsz*nfeature*self.H, h)
37 | self.tmp2:copy(self.tmp:transpose(2, 3))
38 | self.tmp:resize(bsz*nfeature*self.H, self.H)
39 | self.tmp:mm(self.tmp2, self.M:t())
40 | self.output:resize(bsz, nfeature, self.H, self.H)
41 | self.output:copy(self.tmp:view(-1, self.H, self.H):transpose(2, 3))
42 | return self.output
43 | end
44 |
45 | function SpatialUpSample:updateGradInput(input, gradOutput)
46 | local bsz, nfeature = input:size(1), input:size(2)
47 | local h, w = input:size(3), input:size(4)
48 | self.tmp:resize(bsz*nfeature*self.H, self.H)
49 | self.tmp:copy(gradOutput:view(-1, self.H, self.H):transpose(2, 3))
50 | self.tmp2:resize(bsz*nfeature*self.H, h)
51 | self.tmp2:mm(self.tmp, self.M)
52 | self.tmp2 = self.tmp2:view(bsz*nfeature, self.H, h)
53 | self.tmp:resize(bsz*nfeature*h, self.H)
54 | self.tmp:copy(self.tmp2:transpose(2,3))
55 | self.gradInput:resize(bsz*nfeature*h, w)
56 | self.gradInput:mm(self.tmp, self.M)
57 | self.gradInput = self.gradInput:view(bsz, nfeature, h, w)
58 | return self.gradInput
59 | end
60 |
--------------------------------------------------------------------------------
/README.txt:
--------------------------------------------------------------------------------
1 | July 2016
2 | Authors: Michael Mathieu, Camille Couprie
3 |
4 | Update: due to large files that could not be stored on github, the trained models and dataset may be found at:
5 | http://perso.esiee.fr/~coupriec/MathieuICLR16TestCode.zip
6 |
7 | This repository contains:
8 |
9 | - Test code for the ICLR 2016 paper:
10 | [1] Michael Mathieu, Camille Couprie, Yann LeCun:
11 | "Deep multi-scale video prediction beyond mean square error".
12 | http://arxiv.org/abs/1511.05440
13 | http://cs.nyu.edu/~mathieu/iclr2016.html
14 |
15 | - Two trained models (using adversarial+l2norm training or
16 | adversarial+l1norm+gdl training).
17 |
18 | - A subset of the UCF101 test dataset [2] with optical flow results to perform
19 | an evaluation in moving area as described in [1].
20 |
21 | - A training script for the model. Because the Sports1m dataset is hard to get,
22 | we cannot provide an easy script to train on it. Instead, we propose a script
23 | to train on UCF101, which is significantly smaller.
24 |
25 | Main files:
26 | - For testing: test-frame-prediction-on-ucf-rec_gdl.lua
27 | Script to test 2 trained models to predict future frames in video from 4
28 | previous ones on a subset of the UCF101 test dataset.
29 |
30 | - For training: - For training: train_iclr_model.lua
31 | Script to train a model from scratch on the UCF101 dataset. If you want to
32 | train on the Sports1m dataset, you will need to download it and write a
33 | datareader, similar to datasources/ucf101.lua .
34 |
35 | Usage:
36 |
37 | 1- Install torch and the packages (standard packages + nngraph, cudnn.torch, gfx.js)
38 |
39 | For testing:
40 | 2- Uncompress the provided archives.
41 | 3- Run the main script :
42 | th test-frame-prediction-on-ucf-rec_gdl.lua
43 |
44 | It generates results (2 predicted images + animated gifs)
45 | in a directory named 'AdvGDL'.
46 | It also display the average PSNR and SSIM of the 2 first predicted frames
47 | following the evaluation presented in [1].
48 |
49 | For training:
50 | 2- Get the UCF101 dataset (requires unrar, modify the script if you have another .rar extractor):
51 | cd datasources
52 | python get_datasource.py
53 | 3- Get thffpmeg from https://github.com/MichaelMathieu/THFFmpeg
54 | 4- Run the training script:
55 | th train_iclr_model.lua
56 | 5- For visualizing the intermediate results, start the gfx.js server
57 | th -lgfx.start
58 | And go to http://localhost:8000 in your internet browser.
59 |
60 | [2]:Khurram Soomro, Amir Roshan Zamir and Mubarak Shah,
61 | UCF101: A Dataset of 101 Human Action Classes From Videos in The Wild.,
62 | CRCV-TR-12-01, November, 2012.
63 |
64 |
65 |
66 |
67 |
--------------------------------------------------------------------------------
/datasources/classdatasource.lua:
--------------------------------------------------------------------------------
1 | local ClassDatasource = torch.class('ClassDatasource')
2 |
3 | function ClassDatasource:__init()
4 | self.tensortype = torch.getdefaulttensortype()
5 | self.output_cpu, self.labels_cpu = torch.Tensor(), torch.LongTensor()
6 | end
7 |
8 | function ClassDatasource:center(trainset, sets)
9 | -- unused, TODO move
10 | error("shouldn't be used for now")
11 | if trainset:dim() == 3 then
12 | local mean = trainset:mean()
13 | local std = trainset:std()
14 | for _, set in pairs(sets) do
15 | set:add(-mean):div(std)
16 | end
17 | else
18 | assert(trainset:dim() == 4)
19 | for iChannel = 1, trainset:size(2) do
20 | local mean = trainset[{{},iChannel}]:mean()
21 | local std = trainset[{{},iChannel}]:std()
22 | for _, set in pairs(sets) do
23 | set[{{},iChannel}]:add(-mean):div(std)
24 | end
25 | end
26 | end
27 | end
28 |
29 | function ClassDatasource:normalize(trainset, sets, fullset)
30 | local function getminmax(set)
31 | if fullset or set:size(1) < 100 then
32 | return set:min(), set:max()
33 | else
34 | set = set[{{1,100}}]--:contiguous()
35 | return set:min(), set:max()
36 | end
37 | end
38 | -- scales the data between -1 and 1
39 | if trainset:dim() == 3 then
40 | -- grayscale
41 | local mini, maxi = getminmax(trainset)
42 | for _, set in pairs(sets) do
43 | set:add(-mini):mul(2/(maxi-mini)):add(-1)
44 | end
45 | else
46 | -- rgb (or multichannel)
47 | assert(trainset:dim() == 4)
48 | for iChannel = 1, trainset:size(2) do
49 | local mini, maxi = getminmax(trainset[{{},iChannel}])
50 | for _, set in pairs(sets) do
51 | set[{{},iChannel}]:add(-mini):mul(2/(maxi-mini)):add(-1)
52 | end
53 | end
54 | end
55 | end
56 |
57 | function ClassDatasource:typeResults(output, labels)
58 | if self.tensortype == 'torch.CudaTensor' then
59 | self.output_gpu:resize(output:size()):copy(output)
60 | self.labels_gpu:resize(labels:size()):copy(labels)
61 | return self.output_gpu, self.labels_gpu
62 | else
63 | return output, labels
64 | end
65 | end
66 |
67 | function ClassDatasource:type(typ)
68 | self.tensortype = typ
69 | if typ == 'torch.CudaTensor' then
70 | self.output_gpu = torch.CudaTensor()
71 | self.labels_gpu = torch.CudaTensor()
72 | else
73 | self.output_cpu = self.output_cpu:type(typ)
74 | self.output_gpu = nil
75 | self.labels_gpu = nil
76 | collectgarbage()
77 | end
78 | end
79 |
80 | function ClassDatasource:cuda()
81 | self:type('torch.CudaTensor')
82 | end
83 |
84 | function ClassDatasource:float()
85 | self:type('torch.FloatTensor')
86 | end
87 |
88 | function ClassDatasource:double()
89 | self:type('torch.DoubleTensor')
90 | end
--------------------------------------------------------------------------------
/datasources/thread.lua:
--------------------------------------------------------------------------------
1 | --[[
2 | Note that it costs time to switch from set (train/test/valid)
3 | and change the batch size. If you intend to do it a lot, create
4 | multiple instances of datasources, with constant set/batchSize
5 | params:
6 | nDonkeys [4]
7 | --]]
8 |
9 | require 'datasources.datasource'
10 | local threads = require 'threads'
11 |
12 | local ThreadedDatasource, parent = torch.class('ThreadedDatasource', 'ClassDatasource')
13 |
14 | function ThreadedDatasource:__init(getDatasourceFun, params)
15 | parent.__init(self)
16 | self.nDonkeys = params.nDonkeys or 4
17 | --threads.Threads.serialization('threads.sharedserialize') --TODO
18 | self.donkeys = threads.Threads(self.nDonkeys,
19 | function(threadid)
20 | require 'torch'
21 | require 'math'
22 | require 'os'
23 | torch.manualSeed(threadid*os.clock())
24 | math.randomseed(threadid*os.clock()*1.7)
25 | torch.setnumthreads(1)
26 | threadid_t = threadid
27 | datasource_t = getDatasourceFun()
28 | end)
29 | self.donkeys:addjob(
30 | function()
31 | return datasource_t.nChannels, datasource_t.nClasses, datasource_t.h, datasource_t.w
32 | end,
33 | function(nChannels, nClasses, h, w)
34 | self.nChannels, self.nClasses = nChannels, nClasses
35 | self.h, self.w = h, w
36 | end)
37 | self.donkeys:synchronize()
38 | self.started = false
39 | self.output, self.labels = self.output_cpu, self.labels_cpu
40 |
41 | -- TODO? does that overrides the parent __gc?:
42 | if newproxy then
43 | --lua <= 5.1
44 | self.__gc__ = newproxy(true)
45 | getmetatable(self.__gc__).__gc =
46 | function() self.output = nil end
47 | else
48 | self.__gc = function() self.output = nil end
49 | end
50 | end
51 |
52 | function ThreadedDatasource:type(typ)
53 | parent.type(self, typ)
54 | if typ == 'torch.CudaTensor' then
55 | self.output, self.labels = self.output_gpu, self.labels_gpu
56 | else
57 | self.output, self.labels = self.output_cpu, self.labels_cpu
58 | end
59 | end
60 |
61 | function ThreadedDatasource:nextBatch(batchSize, set)
62 | assert(batchSize ~= nil, 'nextBatch: must specify batchSize')
63 | assert(set ~= nil, 'nextBatch: must specify set')
64 | local function addjob()
65 | self.donkeys:addjob(
66 | function()
67 | collectgarbage()
68 | local batch, labels = datasource_t:nextBatch(batchSize, set)
69 | return batch, labels
70 | end,
71 | function(outputs, labels)
72 | if self.output ~= nil then
73 | self.output:resize(outputs:size()):copy(outputs)
74 | self.labels:resize(labels:size()):copy(labels)
75 | self.last_config = {batchSize, set}
76 | end
77 | end)
78 | end
79 | if not self.started then
80 | self.donkeys:synchronize()
81 | self.donkeys:specific(false)
82 | for i = 1, self.nDonkeys do
83 | if self.donkeys:acceptsjob() then
84 | addjob()
85 | end
86 | end
87 | self.started = true
88 | end
89 |
90 | if self.donkeys:haserror() then
91 | print("ThreadedDatasource: There is an error in a donkey")
92 | self.donkeys:terminate()
93 | os.exit(0)
94 | end
95 |
96 | self.last_config = {}
97 | while (self.last_config[1] ~= batchSize) or (self.last_config[2] ~= set) do
98 | addjob()
99 | self.donkeys:dojob()
100 | end
101 | return self.output, self.labels
102 | end
103 |
104 | function ThreadedDatasource:orderedIterator(batchSize, set)
105 | -- this one doesn't parallelize on more than one thread
106 | -- (this might be a TODO but seems hard)
107 | assert(batchSize ~= nil, 'nextBatch: must specify batchSize')
108 | assert(set ~= nil, 'nextBatch: must specify set')
109 | self.donkeys:synchronize()
110 | self.donkeys:specific(true)
111 | self.started = false
112 | self.donkeys:addjob(
113 | 1, function()
114 | collectgarbage()
115 | it_t = datasource_t:orderedIterator(batchSize, set)
116 | end)
117 | local finished = false
118 | local function addjob()
119 | self.donkeys:addjob(
120 | 1,
121 | function()
122 | return it_t()
123 | end,
124 | function(output, labels)
125 | if output == nil then
126 | finished = true
127 | else
128 | if self.output ~= nil then --TODO: why is the line useful?
129 | self.output:resize(output:size()):copy(output)
130 | self.labels:resize(labels:size()):copy(labels)
131 | end
132 | end
133 | end)
134 | end
135 | return function()
136 | self.donkeys:synchronize()
137 | if finished then
138 | self.donkeys:addjob(1, function() it_t = nil collectgarbage() end)
139 | self.donkeys:synchronize()
140 | else
141 | addjob()
142 | return self.output, self.labels
143 | end
144 | end
145 | end
--------------------------------------------------------------------------------
/image_error_measures.lua:
--------------------------------------------------------------------------------
1 |
2 | local iscuda=...
3 |
4 | -- useful to fast image gradient computation
5 | dy = nn.Sequential()
6 | dy:add(nn.SpatialZeroPadding(0,0,1, -1))
7 |
8 |
9 | dx = nn.Sequential()
10 | dx:add(nn.SpatialZeroPadding(1, -1, 0, 0))
11 |
12 | if iscuda==true then
13 | dy:cuda()
14 | dx:cuda()
15 | end
16 |
17 |
18 | --------------------------------------------------------------------------------
19 | -- Calcul du PSNR entre 2 images
20 | function PSNR(true_frame, pred)
21 |
22 | local eps = 0.0001
23 | -- if true_frame:size(1) == 1 then true_frame = true_frame[1] end
24 | -- if pred:size(1) == 1 then pred = pred[1] end
25 |
26 | local prediction_error = 0
27 | for i = 1, pred:size(2) do
28 | for j = 1, pred:size(3) do
29 | for c = 1, pred:size(1) do
30 | -- put image from -1 to 1 to 0 and 255
31 | prediction_error = prediction_error +
32 | (pred[c][i][j] - true_frame[c][i][j])^2
33 | end
34 | end
35 | end
36 | --MSE
37 | prediction_error=128*128*prediction_error/(pred:size(1)*pred:size(2)*pred:size(3))
38 |
39 | --PSNR
40 | if prediction_error>eps then
41 | prediction_error = 10*torch.log((255*255)/ prediction_error)/torch.log(10)
42 | else
43 | prediction_error = 10*torch.log((255*255)/ eps)/torch.log(10)
44 | end
45 | return prediction_error
46 | end
47 |
48 | --------------------------------------------------------------------------------
49 | -- Calcul du SSIM
50 | function SSIM(img1, img2)
51 | --[[
52 | %This is an implementation of the algorithm for calculating the
53 | %Structural SIMilarity (SSIM) index between two images. Please refer
54 | %to the following paper:
55 | %
56 | %Z. Wang, A. C. Bovik, H. R. Sheikh, and E. P. Simoncelli, "Image
57 | %quality assessment: From error visibility to structural similarity"
58 | %IEEE Transactios on Image Processing, vol. 13, no. 4, pp.600-612,
59 | %Apr. 2004.
60 | %
61 |
62 | %Input : (1) img1: the first image being compared
63 | % (2) img2: the second image being compared
64 | % (3) K: constants in the SSIM index formula (see the above
65 | % reference). defualt value: K = [0.01 0.03]
66 | % (4) window: local window for statistics (see the above
67 | % reference). default widnow is Gaussian given by
68 | % window = fspecial('gaussian', 11, 1.5);
69 | % (5) L: dynamic range of the images. default: L = 255
70 | %
71 | %Output: mssim: the mean SSIM index value between 2 images.
72 | % If one of the images being compared is regarded as
73 | % perfect quality, then mssim can be considered as the
74 | % quality measure of the other image.
75 | % If img1 = img2, then mssim = 1.]]
76 |
77 |
78 | if img1:size(1) > 2 then
79 | img1 = image.rgb2y(img1)
80 | img1 = img1[1]
81 | img2 = image.rgb2y(img2)
82 | img2 = img2[1]
83 | end
84 |
85 |
86 |
87 | -- place images between 0 and 255.
88 | img1:add(1):div(2):mul(255)
89 | img2:add(1):div(2):mul(255)
90 |
91 | local K1 = 0.01;
92 | local K2 = 0.03;
93 | local L = 255;
94 |
95 | local C1 = (K1*L)^2;
96 | local C2 = (K2*L)^2;
97 | local window = image.gaussian(11, 1.5/11,0.0708);
98 |
99 | local window = window:div(torch.sum(window));
100 |
101 | local mu1 = image.convolve(img1, window, 'full')
102 | local mu2 = image.convolve(img2, window, 'full')
103 |
104 | local mu1_sq = torch.cmul(mu1,mu1);
105 | local mu2_sq = torch.cmul(mu2,mu2);
106 | local mu1_mu2 = torch.cmul(mu1,mu2);
107 |
108 | local sigma1_sq = image.convolve(torch.cmul(img1,img1),window,'full')-mu1_sq
109 | local sigma2_sq = image.convolve(torch.cmul(img2,img2),window,'full')-mu2_sq
110 | local sigma12 = image.convolve(torch.cmul(img1,img2),window,'full')-mu1_mu2
111 |
112 | local ssim_map = torch.cdiv( torch.cmul((mu1_mu2*2 + C1),(sigma12*2 + C2)) ,
113 | torch.cmul((mu1_sq + mu2_sq + C1),(sigma1_sq + sigma2_sq + C2)));
114 | local mssim = torch.mean(ssim_map);
115 | return mssim
116 | end
117 |
118 |
119 |
120 | ------------------------------------------------------------------------------
121 | -- image sharpeness difference measure
122 |
123 | function computel1difference(img_pred, img_true )
124 | s = img_true:size()
125 |
126 | if img_pred:size(1)==2 then
127 | img_pred = img_pred[{{1},{},{}}]
128 | end
129 |
130 | local eps = 0.0001
131 | local diff_gradients = torch.abs(
132 | torch.abs(dx:forward(img_pred)-img_pred)[{{},{2,s[2]-1},{2,s[3]-1}}] -
133 | torch.abs(dx:forward(img_true)-img_true)[{{},{2,s[2]-1},{2,s[3]-1}}]) +
134 | torch.abs(
135 | torch.abs(dy:forward(img_pred)-img_pred)[{{},{2,s[2]-1},{2,s[3]-1}}] -
136 | torch.abs(dy:forward(img_true)-img_true)[{{},{2,s[2]-1},{2,s[3]-1}}])
137 | local prediction_error = torch.sum(diff_gradients)
138 |
139 | -- Mean
140 | prediction_error=128*128*prediction_error/(s[1]*s[2]*s[3])
141 |
142 | if prediction_error>eps then
143 | prediction_error = 10*torch.log((255*255)/ prediction_error)/torch.log(10)
144 | else
145 | prediction_error = 10*torch.log((255*255)/ eps)/torch.log(10)
146 | end
147 |
148 | return prediction_error
149 | end
150 |
151 |
152 |
153 |
--------------------------------------------------------------------------------
/datasources/augment.lua:
--------------------------------------------------------------------------------
1 | require 'datasources.datasource'
2 | require 'paths'
3 | require 'image'
4 | require 'math'
5 |
6 | local function round(x)
7 | return math.floor(x+0.5)
8 | end
9 |
10 | local AugmentDatasource, parent = torch.class('AugmentDatasource', 'ClassDatasource')
11 |
12 | function AugmentDatasource:__init(datasource, params)
13 | parent.__init(self)
14 | self.datasource = datasource
15 | self.nChannels, self.nClasses = datasource.nChannels, datasource.nClasses
16 | if params.crop then
17 | assert(#(params.crop) == 2)
18 | self.h, self.w = params.crop[1], params.crop[2]
19 | else
20 | self.h, self.w = datasource.h, datasource.w
21 | end
22 |
23 | if self.datasource.tensortype == 'torch.CudaTensor' then
24 | print("Warning: AugmentDatasource used with a cuda datasource. Might break")
25 | end
26 |
27 | self.params = {
28 | flip = params.flip or 0, --1 for vflip, 2 for hflip, 3 for both
29 | crop = params.crop or {self.h, self.w},
30 | scaleup = params.scaleup or 1,
31 | rotate = params.rotate or 0,
32 | cropMinimumMotion = params.cropMinimumMotion or nil,
33 | cropMinimumMotionNTries = params.cropMinimumMotionNTries or 25,
34 | }
35 | end
36 |
37 | local function flatten3d(x)
38 | -- if x is a video, flatten it
39 | if x:dim() == 4 then
40 | return x:view(x:size(1)*x:size(2), x:size(3), x:size(4))
41 | else
42 | assert(x:dim() == 3)
43 | return x
44 | end
45 | end
46 |
47 | local function dimxy(x)
48 | assert((x:dim() == 3) or (x:dim() == 4))
49 | if x:dim() == 4 then
50 | return 3, 4
51 | else
52 | return 2, 3
53 | end
54 | end
55 |
56 | local flip_out1, flip_out2 = torch.Tensor(), torch.Tensor()
57 | local function flip(patch, mode)
58 | local out = patch
59 | if (mode == 1) or (mode == 3) then
60 | if torch.bernoulli(0.5) == 1 then
61 | flip_out1:typeAs(out):resizeAs(out)
62 | image.vflip(flatten3d(flip_out1), flatten3d(out))
63 | out = flip_out1
64 | end
65 | end
66 | if (mode == 2) or (mode == 3) then
67 | if torch.bernoulli(0.5) == 1 then
68 | flip_out2:typeAs(out):resizeAs(out)
69 | image.hflip(flatten3d(flip_out2), flatten3d(out))
70 | out = flip_out2
71 | end
72 | end
73 | return out
74 | end
75 |
76 | local function crop(patch, hTarget, wTarget, minMotion, minMotionNTries)
77 | local dimy, dimx = dimxy(patch)
78 | local h, w = patch:size(dimy), patch:size(dimx)
79 | assert((h >= hTarget) and (w >= wTarget))
80 | if (h == hTarget) and (w == wTarget) then
81 | return patch
82 | else
83 | if minMotion then
84 | assert(patch:dim() == 4)
85 | local x, y
86 | for i = 1, minMotionNTries do
87 | y = torch.random(1, h-hTarget+1)
88 | x = torch.random(1, w-wTarget+1)
89 | local cropped = patch:narrow(dimy, y, hTarget):narrow(dimx, x, wTarget)
90 | if (cropped[-1] - cropped[-2]):norm() > math.sqrt(minMotion * cropped[-1]:nElement()) then
91 | break
92 | end
93 | end
94 | return patch:narrow(dimy, y, hTarget):narrow(dimx, x, wTarget)
95 | else
96 | local y = torch.random(1, h-hTarget+1)
97 | local x = torch.random(1, w-wTarget+1)
98 | return patch:narrow(dimy, y, hTarget):narrow(dimx, x, wTarget)
99 | end
100 | end
101 | end
102 |
103 | local scaleup_out = torch.Tensor()
104 | local function scaleup(patch, maxscale, mode)
105 | mode = mode or 'bilinear'
106 | local dimy, dimx = dimxy(patch)
107 | assert(maxscale >= 1)
108 | local h, w = patch:size(dimy), patch:size(dimx)
109 | local maxH, maxW = round(h*maxscale), round(w*maxscale)
110 | if (maxH == h) and (maxW == w) then
111 | return patch
112 | else
113 | local scaleH = torch.random(h, maxH)
114 | local scaleW = torch.random(w, maxW)
115 | if patch:dim() == 3 then
116 | scaleup_out:typeAs(patch):resize(patch:size(1), scaleH, scaleW)
117 | else
118 | scaleup_out:typeAs(patch):resize(patch:size(1), patch:size(2), scaleH, scaleW)
119 | end
120 | return image.scale(flatten3d(scaleup_out), flatten3d(patch), mode)
121 | end
122 | end
123 |
124 | local rotate_out = torch.Tensor()
125 | local function rotate(patch, thetamax, mode)
126 | mode = mode or 'bilinear'
127 | assert(thetamax >= 0)
128 | if thetamax == 0 then
129 | return patch
130 | else
131 | local theta = torch.uniform(-thetamax, thetamax)
132 | rotate_out:typeAs(patch):resizeAs(patch)
133 | return image.rotate(flatten3d(rotate_out), flatten3d(patch), theta, mode)
134 | end
135 | end
136 |
137 | local input2_out = torch.Tensor()
138 | function AugmentDatasource:nextBatch(batchSize, set)
139 | local input, target = self.datasource:nextBatch(batchSize, set)
140 | if input:dim() == 4 then
141 | input2_out:resize(batchSize, input:size(2),
142 | self.params.crop[1], self.params.crop[2])
143 | else
144 | input2_out:resize(batchSize, input:size(2), input:size(3),
145 | self.params.crop[1], self.params.crop[2])
146 | end
147 | for i = 1, batchSize do
148 | local x = input[i]
149 | x = flip(x, self.params.flip)
150 | x = rotate(x, self.params.rotate)
151 | x = scaleup(x, self.params.scaleup)
152 | x = crop(x, self.params.crop[1], self.params.crop[2],
153 | self.params.cropMinimumMotion, self.params.cropMinimumMotionNTries)
154 | input2_out[i]:copy(x)
155 | end
156 | return self:typeResults(input2_out, target)
157 | end
158 |
159 | --This has NO data augmentation (you can't iterate over augmented data, it's infinite)
160 | function AugmentDatasource:orderedIterator(batchSize, set)
161 | local it = self.datasource:orderedIterator(batchSize, set)
162 | return function()
163 | local input, label = it()
164 | if input ~= nil then
165 | return self:typeResults(input, label)
166 | else
167 | return nil
168 | end
169 | end
170 | end
171 |
--------------------------------------------------------------------------------
/datasources/ucf101.lua:
--------------------------------------------------------------------------------
1 | --[[
2 | params:
3 | nInputFrames
4 | minimumMotion [nil]
5 | --]]
6 |
7 | require 'torch'
8 | require 'io'
9 | require 'paths'
10 | require 'thffmpeg'
11 | require 'math'
12 | require 'datasources.datasource'
13 |
14 | local UCF101Datasource, parent = torch.class('UCF101Datasource', 'ClassDatasource')
15 |
16 | function UCF101Datasource:__init(params)
17 | parent.__init(self)
18 | assert(params.nInputFrames ~= nil, "UCF101Dataset: must specify nInputFrames")
19 | self.datapath = params.datapath or 'datasources/ucf101/'
20 | local setfiles = {train = 'trainlist01.txt', test = 'testlist01.txt'}
21 | assert(paths.dirp(self.datapath), 'Path ' .. self.datapath .. ' does not exist')
22 | local classes = paths.dir(self.datapath)
23 | self.classes = {}
24 | self.sets = {train = {}, test = {}}
25 | for _, set in pairs{'train', 'test'} do
26 | local f = io.open(paths.concat(self.datapath, setfiles[set]), 'r')
27 | assert(f ~= nil, 'File ' .. paths.concat(self.datapath, setfiles[set]) .. ' not found.')
28 | for line in f:lines() do
29 | if string.byte(line:sub(-1,-1)) == 13 then
30 | --remove the windows carriage return
31 | line = line:sub(1,-2)
32 | end
33 | local filename, class
34 | if set == 'train' then
35 | filename = line:sub(1, line:find(' ')-1)
36 | classidx = tonumber(line:sub(line:find(' ')+1, -1))
37 | class = filename:sub(1, filename:find('/')-1)
38 | self.classes[classidx] = class
39 | else
40 | filename = line
41 | class = filename:sub(1, filename:find('/')-1)
42 | end
43 | local avifile = filename:sub(filename:find('/')+1,-1)
44 | if self.sets[set][class] == nil then
45 | self.sets[set][class] = {}
46 | end
47 | table.insert(self.sets[set][class], avifile)
48 | end
49 | f:close()
50 | local n = 0
51 | for _, _ in pairs(self.sets[set]) do
52 | n = n + 1
53 | end
54 | assert(n == 101)
55 | end
56 | self.nbframes = {}
57 | assert(#self.classes == 101)
58 | self.nInputFrames = params.nInputFrames
59 | self.minimumMotion = params.minimumMotion
60 | assert((self.minimumMotion == nil) or (self.minimumMotion > 0))
61 | self.nChannels, self.nClasses = 3, 101
62 | self.h, self.w = 240, 320
63 | self.thffmpeg = THFFmpeg()
64 | end
65 |
66 | function UCF101Datasource:testEnoughMotion(frame1, frame2)
67 | if self.minimumMotion == nil then
68 | return true
69 | else
70 | return (frame1 - frame2):norm() > math.sqrt(self.minimumMotion * frame1:nElement())
71 | end
72 | end
73 |
74 | function UCF101Datasource:nextBatch(batchSize, set)
75 | assert(batchSize ~= nil, 'nextBatch: must specify batchSize')
76 | assert(self.sets[set] ~= nil, 'Unknown set ' .. set)
77 | self.output_cpu:resize(batchSize, self.nInputFrames, self.nChannels, self.h, self.w)
78 | self.labels_cpu:resize(batchSize)
79 | for i = 1, batchSize do
80 | local done = false
81 | while not done do
82 | local iclass = torch.random(self.nClasses)
83 | local class = self.classes[iclass]
84 | local idx = torch.random(#self.sets[set][class])
85 | local filepath = paths.concat(self.datapath, class, self.sets[set][class][idx])
86 | local result = self.thffmpeg:open(filepath)
87 | if result then
88 | if self.nbframes[filepath] == nil then
89 | self.nbframes[filepath] = self.thffmpeg:length()
90 | end
91 | local nframes = self.nbframes[filepath]
92 | if nframes >= self.nInputFrames then
93 | self.labels_cpu[i] = iclass
94 | local istart = torch.random(nframes - self.nInputFrames + 1)
95 | self.thffmpeg:seek(istart-1)
96 | for j = 1, self.nInputFrames do
97 | self.thffmpeg:next_frame(self.output_cpu[i][j])
98 | end
99 | done = self:testEnoughMotion(self.output_cpu[i][-2], self.output_cpu[i][-1])
100 | end
101 | else
102 | print("can't open", i, threadid_t, filepath)
103 | end
104 | end
105 | end
106 | self.thffmpeg:close()
107 | self.output_cpu:mul(2/255):add(-1)
108 | return self:typeResults(self.output_cpu, self.labels_cpu)
109 | end
110 |
111 | function UCF101Datasource:orderedIterator(batchSize, set)
112 | assert(batchSize ~= nil, 'nextBatch: must specify batchSize')
113 | assert(self.sets[set] ~= nil, 'Unknown set ' .. set)
114 | local class_idx = 1
115 | local video_idx = 1
116 | local frame_idx = 1
117 | local thffmpeg2 = THFFmpeg()
118 | return function()
119 | self.output_cpu:resize(batchSize, self.nInputFrames, self.nChannels,
120 | self.h, self.w)
121 | self.labels_cpu:resize(batchSize)
122 | for i = 1, batchSize do
123 | local done = false
124 | while not done do
125 | local class = self.classes[class_idx]
126 | local filepath = paths.concat(self.datapath, class, self.sets[set][class][video_idx])
127 | local goodvid = true
128 | if frame_idx == 1 then
129 | goodvid = thffmpeg2:open(filepath)
130 | end
131 | if goodvid then
132 | self.labels_cpu[i] = class_idx
133 | for j = 1, self.nInputFrames do
134 | if not thffmpeg2:next_frame(self.output_cpu[i][j]) then
135 | done, goodvid = false, false
136 | break
137 | end
138 | end
139 | done = true
140 | frame_idx = frame_idx + self.nInputFrames
141 | end
142 | if not goodvid then
143 | video_idx = video_idx + 1
144 | if video_idx > #self.sets[set][class] then
145 | class_idx = class_idx + 1
146 | video_idx = 1
147 | if class_idx > self.nClasses then
148 | thffmpeg2:close()
149 | return nil
150 | end
151 | end
152 | frame_idx = 1
153 | end
154 | end
155 | end
156 | self.output_cpu:mul(2/255):add(-1)
157 | return self:typeResults(self.output_cpu, self.labels_cpu)
158 | end
159 | end
160 |
161 | function UCF101Datasource:orderedVideoIterator(batchSize, set)
162 | --returns only one sample (the first frames) per video
163 | assert(batchSize ~= nil, 'nextBatch: must specify batchSize')
164 | assert(self.sets[set] ~= nil, 'Unknown set ' .. set)
165 | local class_idx = 1
166 | local video_idx = 1
167 | local thffmpeg2 = THFFmpeg()
168 | return function()
169 | self.output_cpu:resize(batchSize, self.nInputFrames, self.nChannels,
170 | self.h, self.w)
171 | self.labels_cpu:resize(batchSize)
172 | for i = 1, batchSize do
173 | local done = false
174 | while not done do
175 | done = true
176 | local class = self.classes[class_idx]
177 | local filepath = paths.concat(self.datapath, class, self.sets[set][class][video_idx])
178 | if not thffmpeg2:open(filepath) then
179 | done = false
180 | else
181 | self.labels_cpu[i] = class_idx
182 | for j = 1, self.nInputFrames do
183 | if not thffmpeg2:next_frame(self.output_cpu[i][j]) then
184 | done = false
185 | break
186 | end
187 | end
188 | end
189 | video_idx = video_idx + 1
190 | if video_idx > #self.sets[set][class] then
191 | class_idx = class_idx + 1
192 | video_idx = 1
193 | if class_idx > self.nClasses then
194 | thffmpeg2:close()
195 | return nil
196 | end
197 | end
198 | end
199 | end
200 | self.output_cpu:mul(2/255):add(-1)
201 | return self:typeResults(self.output_cpu, self.labels_cpu)
202 | end
203 | end
204 |
--------------------------------------------------------------------------------
/test-frame-prediction-on-ucf-rec_gdl.lua:
--------------------------------------------------------------------------------
1 | --[[
2 | July 2016
3 | Authors: Michael Mathieu, Camille Couprie
4 | Script to test 2 trained models to predict future frames in video from 4
5 | previous ones on a subset of the UCF101 test dataset.
6 | --]]
7 |
8 | require('torch')
9 | require('nngraph')
10 | require('image')
11 | --require('fbtorch')
12 | require('gfx.js')
13 | require('cunn')
14 | require('cudnn')
15 |
16 | paths.dofile('upsample.lua')
17 | paths.dofile('expand.lua')
18 | --dofile('ucf101.lua')
19 |
20 | torch.manualSeed(1)
21 | torch.setnumthreads(4)
22 | iscuda = false
23 | assert(loadfile("image_error_measures.lua"))(iscuda)
24 |
25 | opt_default = {
26 | full = false, -- display previous frames and target, otherwise the prediction
27 | with_pyr = true,
28 | with_delta = true,
29 | with_cuda = true,
30 | network_dir = 'AdvGDL',
31 | delay_gif = 25,
32 | totalNbiters=1,
33 | nChannels= 3,
34 | margin = 5, --for display
35 | nOutputFrames = 1,
36 | nOutputFramesRec = 2,
37 | interv = 1,
38 | flow_im_used=true
39 | }
40 |
41 | op = op or {}
42 | for k, v in pairs(opt_default) do
43 | if op[k] == nil then
44 | op[k] = v
45 | end
46 | end
47 |
48 | local inputH, inputW = 240, 320
49 | local netsize = 64
50 | opt = {batchsize = 1}
51 |
52 | -- loading trained network
53 |
54 | local flow_pth = 'UCF101frm10p/'
55 | local predloaded
56 | if op.network_dir=='Adv' then
57 | predloaded = torch.load('trained_models/new_adv_big_64_smalladv.t7')
58 | elseif op.network_dir=='AdvGDL' then
59 | predloaded = torch.load('trained_models/new_adv_big_gdl_64.t7')
60 | end
61 | local opt = predloaded.opt
62 | local model = predloaded.model
63 | opt.nOutputFrames = 1
64 | opt.batchsize = 1
65 |
66 | ------------------------------------------------------------------------------
67 | -- init multiscale model with dsnet
68 | local dsnet = nn.ConcatTable()
69 | dsnet:add(nn.SpatialAveragePooling(8,8,8,8))
70 | dsnet:add(nn.SpatialAveragePooling(4,4,4,4))
71 | dsnet:add(nn.SpatialAveragePooling(2,2,2,2))
72 | dsnet:add(nn.SpatialAveragePooling(1,1,1,1))
73 | dsnet:cuda()
74 | local dsnetInput = dsnet
75 | local dsnetTarget = dsnet:clone()
76 |
77 | --------------------------------------------------------------------------------
78 | -- network size adaptation for models fine-tuned on larger patchs
79 | for i = 1, #model.modules do
80 | if torch.type(model.modules[i]) == 'nn.ExpandDim' then
81 | local xH = math.floor(math.sqrt(model.modules[i].k) /netsize * inputH + 0.5)
82 | local xW = math.floor(math.sqrt(model.modules[i].k) /netsize * inputW + 0.5)
83 | model.modules[i].k = xH*xW
84 | end
85 | if torch.type(model.modules[i]) == 'nn.View' then
86 | if model.modules[i].numInputDims == 2 then
87 | local s1 = model.modules[i].size[1]
88 | local s2 = math.floor(model.modules[i].size[2] /netsize * inputH + 0.5)
89 | local s3 = math.floor(model.modules[i].size[3] /netsize * inputW + 0.5)
90 | model.modules[i].size = torch.LongStorage{s1, s2, s3}
91 | model.modules[i].numElements = s1*s2*s3
92 | --print(model.modules.size)
93 | end
94 | end
95 | end
96 |
97 | local delta = {torch.CudaTensor(opt.batchsize, 2):zero(),
98 | torch.CudaTensor(opt.batchsize, 4):zero(),
99 | torch.CudaTensor(opt.batchsize, 6):zero(),
100 | torch.CudaTensor(opt.batchsize, 8):zero()}
101 |
102 | ------------------------------------------------------------------------------
103 |
104 | function display_frames(my_array,nbframes)
105 |
106 | local inter = torch.Tensor(op.nChannels,my_array:size(2),op.margin):fill(1)
107 | local todisp = torch.Tensor(op.nChannels,my_array:size(2),op.margin):fill(1)
108 | local todisp2 = torch.Tensor(nbframes,op.nChannels,my_array:size(2),
109 | my_array:size(3))
110 | for i = 1, nbframes do
111 | for j = 1, op.nChannels do
112 | todisp2[i][j]= my_array[(i-1)*3+j]
113 | end
114 | todisp = torch.cat(todisp, todisp2[i], 3)
115 | todisp = torch.cat(todisp, inter, 3)
116 | end
117 | gfx.image(todisp)
118 | end
119 |
120 | function save_frames(prediction, nbframes, filename)
121 | for i = 1, opt.nInputFrames do
122 | prediction[i]:add(1):div(2)
123 |
124 | image.save(filename..'/pred_'..i..'.png',prediction[i])
125 | end
126 | local new_img = torch.Tensor(op.nChannels,inputH, inputW):fill(0)
127 | new_img[1]:fill(1)
128 | for i = opt.nInputFrames+1, opt.nInputFrames+op.nOutputFramesRec do
129 | prediction[i]:add(1):div(2)
130 | new_img[{{},{3,inputH-2},{3,inputW-2}}]=
131 | prediction[i][{{},{3,inputH-2},{3,inputW-2}}]
132 | image.save(filename..'/pred_'..i..'.png',new_img)
133 | end
134 | end
135 |
136 | ------------------------------------------------------------------------------
137 | -- Main job
138 |
139 | local sum_PSNR=torch.Tensor(op.nOutputFramesRec):fill(0)
140 | local sum_err_sharp2=torch.Tensor(op.nOutputFramesRec):fill(0)
141 | local sum_SSIM=torch.Tensor(op.nOutputFramesRec):fill(0)
142 | local nbimagestosave = op.nOutputFramesRec+opt.nInputFrames
143 | local array_to_save= torch.Tensor(nbimagestosave,op.nChannels,inputH,inputW)
144 | local target_to_save =
145 | torch.Tensor(op.nOutputFramesRec,op.nChannels,inputH,inputW)
146 |
147 | local input, output, target
148 | local batch=1
149 | local nbvideos = 3783
150 | local nbframes, nbpartvid
151 | local nbvid = torch.Tensor(op.nOutputFramesRec):fill(0)
152 |
153 | local index =
154 | torch.range(1,(opt.nInputFrames+op.nOutputFramesRec)*op.interv, op.interv)
155 |
156 |
157 | for videoidx = 1,nbvideos,10 do
158 | --local vid, label --= datasets[set]:nextTestImage(videoidx)
159 | local vid =
160 | torch.Tensor(opt.nInputFrames+ op.nOutputFramesRec, op.nChannels, 240,320)
161 | for fr=1,opt.nInputFrames do
162 | im_name = flow_pth..videoidx..'/pred_'..fr..'.png'
163 | vid[fr] = (image.load(im_name))
164 | end
165 | for fr = 1,op.nOutputFramesRec do
166 | im_name = flow_pth..videoidx..'/target_'..fr..'.png'
167 | vid[fr+opt.nInputFrames] = (image.load(im_name))
168 | end
169 |
170 | vid:mul(2):add(-1)
171 | nbframes = vid:size(1)
172 | nbpartvid = torch.abs(nbframes/opt.nInputFrames)
173 |
174 | local filename_out = op.network_dir..'/'..videoidx
175 | for ii = 1,op.nOutputFramesRec do
176 |
177 | -- extract the first frames
178 | input = vid[{{1 , opt.nInputFrames}}]
179 | for f=1,opt.nInputFrames-ii+1 do
180 | input[f] = vid[index[ii+f-1]]
181 | end
182 | for j=1,ii-1 do
183 | if j> opt.nInputFrames then break end
184 | input[opt.nInputFrames+1-j] = array_to_save[ii-j+opt.nInputFrames]
185 | end
186 | target = torch.Tensor(op.nOutputFrames, op.nChannels, 240,320)
187 | for f=1,op.nOutputFrames do
188 | target[f] = vid[index[opt.nInputFrames+ii+f-1]]
189 | end
190 |
191 | input = input:view(1, op.nChannels*opt.nInputFrames,
192 | input:size(3), input:size(4))
193 | target = target:view(1, op.nChannels*opt.nOutputFrames,
194 | target:size(3), target:size(4))
195 | if op.with_pyr == true then
196 | input = dsnetInput:forward(input:cuda())
197 | target = dsnetTarget:forward(target:cuda())
198 | end
199 | if op.with_delta == true then
200 | output = model:forward({input, delta})[1]
201 | elseif op.with_pyr == false then
202 | output = model:forward(input:cuda())
203 | else
204 | output = model:forward(input)
205 | end
206 | if op.with_pyr == true then
207 | output = output[4] -- the largest scale output[1][4]
208 | end
209 | output = output:double()
210 | if op.with_pyr == true then
211 | input = input[4]
212 | input = input[{{1},{},{},{}}]:float()
213 | target = target[4]:double()
214 | end
215 | output = output[batch]
216 |
217 | -- replace target and input in same space than the output
218 | target = target[batch]
219 |
220 | if ii==1 then
221 | array_to_save[{{1,opt.nInputFrames}}]=input
222 | end
223 | array_to_save[opt.nInputFrames+ii]=output -- target
224 |
225 | -- extract moving pixels for SNR computations
226 | if op.flow_im_used then
227 | local flow_im_name
228 | local moutput = torch.Tensor(3,240,320):fill(-1)
229 | local mtarget = torch.Tensor(3,240,320):fill(-1)
230 | if ii==1 then
231 | flow_im_name = flow_pth..videoidx..'/pred_4_flow.png'
232 | else
233 | flow_im_name = flow_pth..videoidx..'/target_'..(ii-1)..'_flow.png'
234 | end
235 |
236 | local flow_im = image.load(flow_im_name)
237 | local s = output[{{1,3}}]:size()
238 |
239 | for j=1, s[2] do
240 | for k=1, s[3] do
241 | if flow_im[1][j][k]< 0.2 or flow_im[2][j][k]< 0.2
242 | or flow_im[3][j][k]< 0.2 then -- moving
243 | for i=1,s[1] do
244 | moutput[i][j][k] = output[i][j][k]
245 | mtarget[i][j][k] = target[i][j][k]
246 | end
247 | end
248 | end
249 | end
250 |
251 | local psnr = PSNR(moutput, mtarget)
252 | if psnr < 50 then
253 | sum_PSNR[ii] = sum_PSNR[ii]+psnr
254 | sum_SSIM[ii] = sum_SSIM[ii]+SSIM(moutput, mtarget)
255 | sum_err_sharp2[ii] = sum_err_sharp2[ii] +
256 | computel1difference(moutput, mtarget)
257 | nbvid[ii] = nbvid[ii]+1
258 | end
259 | else
260 | sum_PSNR[ii] = sum_PSNR[ii]+PSNR(output[{{1,3}}], target[{{1,3}}])
261 | sum_SSIM[ii] = sum_SSIM[ii]+SSIM(output[{{1,3}}], target[{{1,3}}])
262 | sum_err_sharp2[ii] = sum_err_sharp2[ii] +
263 | computel1difference(output[{{1,3}}], target[{{1,3}}])
264 | nbvid[ii] = nbvid[ii]+1
265 | end
266 | end --for ii = 1,op.nOutputFramesRec
267 |
268 | print(filename_out)
269 | os.execute('mkdir -p "' .. filename_out .. '"; ')
270 | save_frames(array_to_save, nbimagestosave, filename_out)
271 |
272 | for i= 1,op.nOutputFramesRec do
273 | print('******** video '..videoidx..', '..i..' th frame pred *************')
274 | print(string.format("score sharp diff: %.2f",sum_err_sharp2[i]/nbvid[i]))
275 | print(string.format("PSNR: %.2f",sum_PSNR[i]/nbvid[i]))
276 | print(string.format("SSIM: %.2f",sum_SSIM[i]/nbvid[i]))
277 | end
278 |
279 | os.execute('convert $(for ((a=1; a<'..nbimagestosave..
280 | '; a++)); do printf -- "-delay '..op.delay_gif..' '..filename_out..
281 | '/pred_%s.png " $a; done;) '..filename_out..'result.gif')
282 |
283 | end --for videoidx = 1,nbvideos,10
284 |
285 |
--------------------------------------------------------------------------------
/get_model.lua:
--------------------------------------------------------------------------------
1 | require('nngraph')
2 | require('cunn')
3 | require('cudnn')
4 | require('nnx')
5 |
6 |
7 | local function getConvNet(struct, nChannels, h, w, nOutputChannels, nOutputElements)
8 | local isInFCMode, nElements = false, nil
9 | local input = nn.Identity()()
10 | local x = nn.Identity()(input)
11 | local feature = nil
12 | for i = 1, #struct do
13 | if struct[i][1] == 'conv' then
14 | local nOutputs = struct[i][3] or nOutputChannels
15 | assert(not isInFCMode) -- no convolutions after FC
16 | assert(nOutputs ~= nil) -- no nil if nOutputChannels is nil
17 | assert((struct[i][3] ~= nil) or (i == #struct)) -- no nil except in last layer
18 | x = cudnn.SpatialConvolution(nChannels, nOutputs,
19 | struct[i][2], struct[i][2],
20 | struct[i][4], struct[i][4]):cuda()(x)
21 | if struct[i][4] ~= nil then
22 | nChannels, h, w = nOutputs, math.floor((h - struct[i][2])/struct[i][4]) + 1, math.floor((w - struct[i][2])/struct[i][4]) + 1
23 | else
24 | nChannels, h, w = nOutputs, h - struct[i][2] + 1, w - struct[i][2] + 1
25 | end
26 | elseif struct[i][1] == 'convp' then
27 | local nOutputs = struct[i][3] or nOutputChannels
28 | assert(struct[i][2] % 2 == 1) -- no even kernel sizes when padding!
29 | assert(not isInFCMode) -- no convolutions after FC
30 | assert(nOutputs ~= nil) -- no nil if nOutputChannels is nil
31 | assert((struct[i][3] ~= nil) or (i == #struct)) -- no nil except in last layer
32 | x = cudnn.SpatialConvolution(nChannels, nOutputs,
33 | struct[i][2], struct[i][2],
34 | 1, 1, (struct[i][2]-1)/2,
35 | (struct[i][2]-1)/2):cuda()(x)
36 | nChannels = nOutputs
37 | elseif struct[i][1] == 'maxpool' then
38 | assert(not isInFCMode) -- no pooling after FC
39 | x = cudnn.SpatialMaxPooling(struct[i][2], struct[i][2],
40 | struct[i][3], struct[i][3])(x)
41 | h = math.floor((h - struct[i][2])/struct[i][3] + 1)
42 | w = math.floor((w - struct[i][2])/struct[i][3] + 1)
43 | elseif struct[i][1] == 'fc' then
44 | local nOutputs = struct[i][2] or nOutputElements
45 | assert(nOutputs ~= nil) -- no nil if nOutputElements is nil
46 | assert((struct[i][2] ~= nil) or (i == #struct)) -- no nil except in last layer
47 | if not isInFCMode then
48 | nElements = h*w*nChannels
49 | x = nn.View(nElements):setNumInputDims(3)(x)
50 | isInFCMode = true
51 | end
52 | x = nn.Linear(nElements, nOutputs):cuda()(x)
53 | nElements = nOutputs
54 | elseif struct[i][1] == 'feature' then
55 | assert(feature == nil) -- only one feature layer (for now)
56 | feature = x
57 | elseif struct[i][1] == 'spatialbatchnorm' then
58 | x = nn.SpatialBatchNormalization(nChannels)(x)
59 | else
60 | error('Unknown network element ' .. struct[i][1])
61 | end
62 | if i ~= #struct then
63 | x = nn.ReLU()(x)
64 | end
65 | end
66 | local net = nn.gModule({input}, {x, feature})
67 | if isInFCMode then
68 | return net, nElements
69 | else
70 | return net, nChannels, h, w
71 | end
72 | end
73 |
74 | function getPyrModel(opt, dataset, in_modules)
75 | -- assume input/target is between -1 and 1
76 | local out_modules = {}
77 | local function getPred(imagesScaled, inputGuess, scale, scaleRatio, in_module)
78 | -- input: images(scale res), guess(scale/2 res)
79 | local ws, hs = opt.w / scale, opt.h / scale
80 | local guessScaled, x = nil, nil
81 | local nInputChannels = opt.nInputFrames*dataset.nChannels
82 | if inputGuess ~= nil then
83 | guessScaled = nn.SpatialUpSamplingNearest(scaleRatio)(inputGuess)
84 | nInputChannels = nInputChannels +opt.nTargetFrames*dataset.nChannels
85 | x = nn.JoinTable(2){imagesScaled, guessScaled}
86 | else
87 | x = imagesScaled
88 | end
89 | local mod = in_module
90 | if not mod then
91 | mod = getConvNet(opt.modelStruct[scale], nInputChannels,
92 | hs, ws, opt.nTargetFrames*dataset.nChannels)
93 | end
94 | mod = mod:cuda()
95 | x = mod(x)
96 | out_modules[scale] = mod
97 | local x, features = x:split(2)
98 | if inputGuess ~= nil then
99 | x = nn.CAddTable(){x, guessScaled}
100 | end
101 | x = nn.Tanh()(x)
102 | return x, features
103 | end
104 |
105 | local inputImages = nn.Identity()()
106 | local pred, features = {}, {}
107 | for i = 1, #opt.scaleList do
108 | local scale = opt.scaleList[i]
109 | local mod = nil
110 | if in_modules then
111 | mod = in_modules[scale]
112 | end
113 | pred[i], features[i] =
114 | getPred(nn.SelectTable(i)(inputImages),
115 | pred[i-1], --nil if i == 0, on purpose
116 | scale,
117 | (i == 1) or (opt.scaleList[i-1] / scale),
118 | mod)
119 | end
120 | pred = nn.Identity()(pred)
121 | features = nn.Identity()(features)
122 | model = nn.gModule({inputImages}, {pred, features})
123 | model = model:cuda()
124 | return model, out_modules
125 | end
126 |
127 | function getRecModel(opt, model, datasource)
128 | assert(opt.h == opt.w)
129 | local input = nn.Identity()()
130 | local output = {}
131 | local lastinput = input
132 | for i = 1, opt.nRec do
133 | local netoutput = model:clone('weight', 'bias', 'gradWeight', 'gradBias')(lastinput)
134 | netoutput = nn.SelectTable(1)(netoutput)
135 | output[i] = netoutput
136 | if i ~= opt.nRec then
137 | local newinput = {}
138 | for j = 1, #opt.scaleList do
139 | local npix = opt.h / opt.scaleList[j]
140 | local x1 = nn.SelectTable(j)(lastinput)
141 | x1 = nn.View(opt.batchsize, opt.nInputFrames, datasource.nChannels, npix, npix)(x1)
142 | x1 = nn.Narrow(2, 2, opt.nInputFrames-1)(x1)
143 | local x2 = nn.SelectTable(j)(netoutput)
144 | x2 = nn.View(opt.batchsize, 1, datasource.nChannels, npix, npix)(x2)
145 | local y = nn.JoinTable(2){x1, x2}
146 | newinput[j] =
147 | nn.View(opt.batchsize, opt.nInputFrames*datasource.nChannels, npix, npix)(y)
148 | end
149 | lastinput = newinput
150 | end
151 | end
152 | if #output == 1 then
153 | local dummy = nn.ConcatTable()
154 | dummy:add(nn.Identity())
155 | output = dummy(output)
156 | return nn.gModule({input}, {output}):cuda()
157 | else
158 | return nn.gModule({input}, output):cuda()
159 | end
160 | end
161 |
162 | function getPyrAdv(opt, dataset)
163 | local inputImages = nn.Identity()()
164 | local inputPred = nn.Identity()()
165 | local adv = {}
166 | for i = 1, #opt.scaleList do
167 | assert(opt.advStruct[opt.scaleList[i] ] ~= nil) -- model and adv must have same scales
168 | local x = nn.JoinTable(2){nn.SelectTable(i)(inputImages),
169 | nn.SelectTable(i)(inputPred)}
170 | x = getConvNet(opt.advStruct[opt.scaleList[i] ],
171 | (opt.nInputFrames+opt.nTargetFrames)*dataset.nChannels,
172 | opt.w / opt.scaleList[i], opt.h / opt.scaleList[i], nil, 1)(x)
173 | adv[i] = nn.Sigmoid()(x)
174 | end
175 |
176 | advmodel = nn.gModule({inputImages, inputPred}, adv)
177 | advmodel = advmodel:cuda()
178 | return advmodel
179 | end
180 |
181 | function getRecAdv(opt, advmodel, datasource, in_modules)
182 | assert((advmodel == nil) ~= (in_modules == nil))
183 | local input1 = nn.Identity()()
184 | local input2 = nn.Identity()()
185 | local output = {}
186 | local input1b = input1
187 | out_modules = {}
188 | for i = 1, opt.nRec do
189 | local input2b = nn.SelectTable(i)(input2)
190 | local mod = nil
191 | if advmodel ~= nil then
192 | if opt.advshare == true then
193 | mod = advmodel:clone('weight', 'bias', 'gradWeight', 'gradBias')
194 | else
195 | mod = advmodel:clone()
196 | print("====================================================================")
197 | print("================= CLONING ADVMODEL =====================")
198 | print("====================================================================")
199 | end
200 | else
201 | if in_modules[i] ~= nil then
202 | mod = in_modules[i]
203 | else
204 | if opt.advshare == true then
205 | mod = in_modules[#in_modules]:clone('weight', 'bias', 'gradWeight', 'gradBias')
206 | print("====================================================================")
207 | print("================= SHARING LAST ADVMODEL =====================")
208 | print("====================================================================")
209 | else
210 | mod = in_modules[#in_modules]:clone()
211 | print("====================================================================")
212 | print("================= CLONING LAST ADVMODEL =====================")
213 | print("====================================================================")
214 | end
215 | end
216 | end
217 | for i, node in ipairs(mod.backwardnodes) do
218 | --TODO: somehow :cuda() fails otherwise
219 | node.data.gradOutputBuffer = nil
220 | end
221 | out_modules[i] = mod
222 | output[i] = mod{input1b, input2b}
223 | if i ~= opt.nRec then
224 | local newinput1b = {}
225 | for j = 1, #opt.scaleList do
226 | local npix = opt.h / opt.scaleList[j]
227 | local x1 = nn.SelectTable(j)(input1b)
228 | x1 = nn.View(opt.batchsize, opt.nInputFrames, datasource.nChannels, npix, npix)(x1)
229 | x1 = nn.Narrow(2, 2, opt.nInputFrames-1)(x1)
230 | local x2 = nn.SelectTable(j)(input2b)
231 | x2 = nn.View(opt.batchsize, 1, datasource.nChannels, npix, npix)(x2)
232 | local y = nn.JoinTable(2){x1, x2}
233 | newinput1b[j] =
234 | nn.View(opt.batchsize, opt.nInputFrames*datasource.nChannels, npix, npix)(y)
235 | end
236 | input1b = nn.Identity()(newinput1b)
237 | end
238 | end
239 | if #output == 1 then
240 | local dummy = nn.ConcatTable()
241 | dummy:add(nn.Identity())
242 | output = dummy(output)
243 | return nn.gModule({input1, input2}, {output}):cuda(), out_modules
244 | else
245 | return nn.gModule({input1, input2}, output):cuda(), out_modules
246 | end
247 | end
248 |
249 | function getPyrPreprocessor(opt, dataset)
250 | local net = nn.ConcatTable()
251 | for i = 1, #opt.scaleList do
252 | local net2 = nn.Sequential()
253 | net:add(net2)
254 | net2:add(nn.FunctionWrapper(
255 | function(self) end,
256 | function(self, input)
257 | return input:view(input:size(1),
258 | -1, input:size(input:dim()-1),
259 | input:size(input:dim()))
260 | end,
261 | function(self, input, gradOutput)
262 | return gradOutput:viewAs(input)
263 | end))
264 | scale = opt.scaleList[i]
265 | net2:add(nn.SpatialAveragePooling(scale, scale, scale, scale))
266 | end
267 | net:cuda()
268 | return net
269 | end
270 |
271 | -- replicated the criterion into a sort of parallel criterion
272 | -- TODO: is this used?
273 | function getPyrCriterion(opt, simpleCriterion)
274 | local output = {}
275 | output.criterion = nn.ParallelCriterion()
276 | for i = 1, #opt.scaleList do
277 | output.criterion:add(simpleCriterion:clone())
278 | end
279 | output.criterion:cuda()
280 | output.dsnet = nn.ConcatTable()
281 | for i = 1, #opt.scaleList do
282 | local scale = opt.scaleList[i]
283 | output.dsnet:add(nn.SpatialAveragePooling(scale, scale, scale, scale))
284 | end
285 | output.dsnet:cuda()
286 | function output:forward(input, target)
287 | return output.criterion:forward(input, output.dsnet:forward(target))
288 | end
289 | function output:updateGradInput(input, target)
290 | return output.criterion:backward(input, output.dsnet.output)
291 | end
292 | output.backward = output.updateGradInput
293 | return output
294 | end
295 |
296 | GDL, gdlparent = torch.class('nn.GDLCriterion', 'nn.Criterion')
297 |
298 | function GDL:__init(alpha)
299 | gdlparent:__init(self)
300 | self.alpha = alpha or 1
301 | assert(alpha == 1) --for now
302 | local Y = nn.Identity()()
303 | local Yhat = nn.Identity()()
304 | local Yi1 = nn.SpatialZeroPadding(0,0,0,-1)(Y)
305 | local Yj1 = nn.SpatialZeroPadding(0,0,-1,0)(Y)
306 | local Yi2 = nn.SpatialZeroPadding(0,-1,0,0)(Y)
307 | local Yj2 = nn.SpatialZeroPadding(-1,0,0,0)(Y)
308 | local Yhati1 = nn.SpatialZeroPadding(0,0,0,-1)(Yhat)
309 | local Yhatj1 = nn.SpatialZeroPadding(0,0,-1,0)(Yhat)
310 | local Yhati2 = nn.SpatialZeroPadding(0,-1,0,0)(Yhat)
311 | local Yhatj2 = nn.SpatialZeroPadding(-1,0,0,0)(Yhat)
312 | local term1 = nn.Abs()(nn.CSubTable(){Yi2, Yi1})
313 | local term2 = nn.Abs()(nn.CSubTable(){Yhati2, Yhati1})
314 | local term3 = nn.Abs()(nn.CSubTable(){Yj2, Yj1})
315 | local term4 = nn.Abs()(nn.CSubTable(){Yhatj2, Yhatj1})
316 | local term12 = nn.CSubTable(){term1, term2}
317 | local term34 = nn.CSubTable(){term3, term4}
318 | self.net = nn.gModule({Yhat, Y}, {term12, term34})
319 | self.net:cuda()
320 | self.crit = nn.ParallelCriterion()
321 | self.crit:add(nn.AbsCriterion())
322 | self.crit:add(nn.AbsCriterion())
323 | self.crit:cuda()
324 | self.target1 = torch.CudaTensor()
325 | self.target2 = torch.CudaTensor()
326 | end
327 |
328 | function GDL:updateOutput(input, target)
329 | self.netoutput = self.net:updateOutput{input, target}
330 | self.target1:resizeAs(self.netoutput[1]):zero()
331 | self.target2:resizeAs(self.netoutput[2]):zero()
332 | self.target = {self.target1, self.target2}
333 | self.loss = self.crit:updateOutput(self.netoutput, self.target)
334 | return self.loss
335 | end
336 |
337 | function GDL:updateGradInput(input, target)
338 | local gradInput =
339 | self.crit:updateGradInput(self.netoutput, self.target)
340 | self.gradInput =
341 | self.net:updateGradInput({input, target}, gradInput)[1]
342 | return self.gradInput
343 | end
344 |
--------------------------------------------------------------------------------
/train_iclr_model.lua:
--------------------------------------------------------------------------------
1 | --[[
2 | Trains an L2 + adversarial network (can be only L2 by setting advweight to 0)
3 | to predict next frame.
4 | The network uses a multi-resolution pyramid (hardcoded to 4 levels for now)
5 | Uses latent variable in additive mode at each level
6 | Supports sgd and adagrad optimization
7 | --]]
8 |
9 | require('torch')
10 | require('optim')
11 | require('get_model')
12 | require 'gfx.js'
13 |
14 | nngraph.setDebug(false)
15 | gfx.verbose = false
16 | torch.setnumthreads(2)
17 | torch.manualSeed(1)
18 |
19 | opt_default = {
20 | -- general
21 | devid = 2, -- GPU id
22 | saveName = 'model.t7', -- save file name
23 | loadName = '',
24 | loadOpt=false,
25 | dataset = 'ucf101', -- dataset name
26 | -- training
27 | nEpoches = 10000, -- number of "epoches" per training
28 | nIters = 100, -- number of minibatches per "epoch"
29 | batchsize = 8, -- number of samples per minibatches
30 | -- model
31 | h = 32,
32 | w = 32, -- size of the patches
33 | modelOptim = 'sgd', -- delta(adadelta), grad(adagrad) or sgd
34 | modelConfig = {
35 | learningRate = 0.02,
36 | --learningRateDecay = 0,
37 | --weightDecay = 0,
38 | --momentum = 0
39 | },
40 | nInputFrames = 4, -- number of *input* frames (excluding target)
41 | nTargetFrames = 1, -- number of frames to predict
42 |
43 | modelStruct = {
44 | [8] = {
45 | {'convp', 3, 16},
46 | {'convp', 3, 32},
47 | {'feature'},
48 | {'convp', 3, 16},
49 | {'convp', 3, nil}},
50 | [4] = {
51 | {'convp', 5, 16},
52 | {'convp', 3, 32},
53 | {'feature'},
54 | {'convp', 3, 16},
55 | {'convp', 5, nil}},
56 | [2] = {
57 | {'convp', 5, 16},
58 | {'convp', 3, 32},
59 | {'convp', 3, 64},
60 | {'feature'},
61 | {'convp', 3, 32},
62 | {'convp', 3, 16},
63 | {'convp', 5, nil}},
64 | [1] = {
65 | {'convp', 7, 16},
66 | {'convp', 5, 32},
67 | {'convp', 5, 64},
68 | {'feature'},
69 | {'convp', 5, 32},
70 | {'convp', 5, 16},
71 | {'convp', 7, nil}}},
72 | -- adv
73 | advOptim = 'sgd', -- see modelOptim
74 | advConfig = {
75 | learningRate = 0.02,
76 | },
77 | l2weight = 1, -- L2 weight in the loss
78 | advweight = 0.01, -- adversarial weight in the loss
79 | advNIter = 1, -- number of adversarial training iterations
80 | advExt = 'full', -- extend adv training to fake "real" examples [none|cheap|full]
81 | advStruct = {
82 | [8] = {
83 | {'conv', 3, 32},
84 | {'fc', 256},
85 | {'fc', 128},
86 | {'fc', nil}},
87 | [4] = {
88 | {'conv', 3, 32},
89 | {'conv', 3, 32},
90 | {'conv', 3, 64},
91 | {'fc', 256},
92 | {'fc', 128},
93 | {'fc', nil}},
94 | [2] = {
95 | {'conv', 5, 32},
96 | {'conv', 5, 32},
97 | {'conv', 5, 64},
98 | {'fc', 256},
99 | {'fc', 128},
100 | {'fc', nil}},
101 | [1] = {
102 | {'conv', 7, 32},
103 | {'conv', 7, 32},
104 | {'conv', 5, 64},
105 | {'conv', 5, 128},
106 | {'maxpool', 2, 2},
107 | --TODO: shared weights with two last layers
108 | {'fc', 256},
109 | {'fc', 128},
110 | {'fc', nil}},
111 | },
112 | }
113 | opt = opt or {}
114 | for k, v in pairs(opt_default) do
115 | if opt[k] == nil then
116 | opt[k] = v
117 | end
118 | end
119 | modelState = nil
120 | advState = nil
121 | assert((opt.advweight == 0) ~= (opt.advNIter ~= 0)) -- if not, it's probably a mistake
122 |
123 | cutorch.setDevice(opt.devid)
124 |
125 | loaded = {}
126 | if opt.loadName ~= '' then
127 | loaded = torch.load(opt.loadName)
128 | model = loaded.model
129 | advmodel = loaded.advmodel
130 | if loaded.opt.h ~= opt.h then
131 | advmodel = nil
132 | end
133 | end
134 | if opt.loadOpt == true then
135 | local oldopt = opt
136 | opt = loaded.opt
137 | --opt.devid = oldopt.devid
138 | --opt.saveName = oldopt.saveName
139 | for k, v in pairs(opt_override) do
140 | opt[k] = v
141 | end
142 | end
143 |
144 | local w, h = opt.h, opt.w
145 | local winput = w
146 | local hinput = h
147 | if opt.dataset == 'sports1m' then
148 | error("no sports1m dataset")
149 | elseif opt.dataset == 'ucf101' then
150 | require('datasources.thread')
151 | local optt = opt -- need local var, opt is global
152 | dataset = ThreadedDatasource(
153 | function()
154 | require('datasources.augment')
155 | require('datasources.ucf101')
156 | local ucfdataset = UCF101Datasource{
157 | nInputFrames = optt.nInputFrames+optt.nTargetFrames
158 | }
159 | return AugmentDatasource(ucfdataset, {crop = {h, w}})
160 | end, {nDonkeys = 8})
161 | dataset:cuda()
162 | else
163 | error("Unknown dataset " .. opt.dataset)
164 | end
165 |
166 | opt.scaleList = {}
167 | for k, v in pairs(opt.modelStruct) do
168 | opt.scaleList[1+#opt.scaleList] = k
169 | end
170 | table.sort(opt.scaleList, function(a,b) return b
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 | {one line to give the program's name and a brief idea of what it does.}
635 | Copyright (C) {year} {name of author}
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | {project} Copyright (C) {year} {fullname}
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------