├── LICENSE ├── README.md └── factorised_blp.py /LICENSE: -------------------------------------------------------------------------------- 1 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 2 | Version 2, December 2004 3 | 4 | Copyright (C) 2021 Thomas Winterbottom 5 | 6 | Everyone is permitted to copy and distribute verbatim or modified 7 | copies of this license document, and changing it is allowed as long 8 | as the name is changed. 9 | 10 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 11 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 12 | 13 | 0. You just DO WHAT THE FUCK YOU WANT TO. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Low Rank Bilinear Pooling 2 | Low rank bilinear pooling in torch and keras. Frankly there are now more useful implementations, but I'm happy to leave this one here if its simplicity happens to suit anyone better. 3 | -------------------------------------------------------------------------------- /factorised_blp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | #Factorised bilinear pooling for the inputs 6 | class fblp(nn.Module): 7 | """ 8 | Replace each of these yourselves, this torch implementation is written to work with my framework 9 | options.pool_k = K parameter from paper (5) 10 | options.pool_o = o parameter from paper (1000) 11 | options.xpool_in = Input size of x (1024) 12 | options.ypool_in = Input size of y (1024) 13 | """ 14 | def __init__(self, options): 15 | super(fblp, self).__init__() 16 | joint_emb_size = options.pool_k * options.pool_o 17 | self.options = options 18 | self.xproj = nn.Linear(options.xpool_in, joint_emb_size) 19 | self.yproj = nn.Linear(options.ypool_in, joint_emb_size) 20 | 21 | def forward(self, x, y): 22 | import ipdb; ipdb.set_trace() 23 | x = self.xproj(x) # batch, joint_emb_size 24 | y = self.yproj(y) # batch, joint_emb_size 25 | out = torch.mul(x, y) # batch, joint_emb_size 26 | out = out.view(-1, 1, self.options.pool_o, self.options.pool_k) # batch, 1, o, k 27 | out = torch.squeeze(torch.sum(out, 3)) # batch, o 28 | out = torch.sqrt(F.relu(out)) - torch.sqrt(F.relu(-out)) # Signed square root 29 | out = F.normalize(out) 30 | return(out) 31 | 32 | 33 | 34 | from keras.layers import Layer, Reshape, Multiply 35 | from keras import backend as K 36 | from keras import initializers 37 | from keras.activations import relu 38 | class keras_fblp(Layer): 39 | """ 40 | pool_k = K parameter from paper (5) 41 | pool_o = o parameter from paper (1000) 42 | x_in = Input size of x (1024) 43 | y_in = Input size of y (1024) 44 | """ 45 | def __init__(self, pool_k, pool_o, x_in, y_in): 46 | self.reshape = Reshape((pool_o, pool_k)) 47 | self.ewmultiply = Multiply() 48 | self.pool_k = pool_k 49 | self.pool_o = pool_o 50 | self.output_dim = pool_k 51 | self.x_in = x_in 52 | self.y_in = y_in 53 | super(keras_fblp, self).__init__() 54 | 55 | def build(self, input_shape): 56 | #Define the weights for our 2 fully connected layers (linear projections) 57 | self.x_weights = self.add_weight( 58 | name = 'x_weight', 59 | shape = (self.x_in, self.pool_k*self.pool_o), 60 | initializer = 'uniform', 61 | trainable = True 62 | ) 63 | self.y_weights = self.add_weight( 64 | name = 'y_weight', 65 | shape = (self.y_in, self.pool_k*self.pool_o), 66 | initializer = 'uniform', 67 | trainable = True 68 | ) 69 | super(keras_fblp, self).build(input_shape) 70 | 71 | def call(self, inputs): 72 | x, y= inputs 73 | x = K.dot(x, self.x_weights) # batch, (pool_k*pool_o) 74 | y = K.dot(y, self.y_weights) # batch, (pool_k*pool_o) 75 | out = self.ewmultiply([x, y]) # batch, (pool_k*pool_o) 76 | out = self.reshape(out) # batch, pool_k, pool_o 77 | out = K.sum(out, axis=2) # batch, pool_o 78 | out = K.sqrt( relu( out ) ) - K.sqrt( relu( -out ) ) #Signed Square Root 79 | out = K.l2_normalize( out ) # batch, pool_o 80 | return(out) 81 | 82 | def compute_output_shape(self, input_shape): 83 | return([input_shape[0][0], self.output_dim]) 84 | 85 | ######## Torch 86 | # test = fblp(options) 87 | # x = torch.ones(32, 2048) 88 | # y = torch.ones(32, 2048) 89 | # out = test(x,y) 90 | # print(out) 91 | ######## 92 | ######## Keras 93 | # test = keras_fblp(5, 1000, 2048, 2048) 94 | # x = K.ones((32, 2048)) 95 | # y = K.ones((32, 2048)) 96 | # out = test([x,y]) 97 | # print(out) 98 | ######## --------------------------------------------------------------------------------