├── README.md └── main.py /README.md: -------------------------------------------------------------------------------- 1 | # PE-LTR 2 | 3 | The demo codes for the paper "A Pareto-Eficient Algorithm for Multiple Objective Optimization in E-Commerce Recommendation". 4 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This is the example code for PE-LTR. 5 | The losses and inputs can be modified. 6 | The core part is the pareto step. 7 | The example is an illustration for the zero constraints on the priorities. 8 | Created on Wed Oct 9 11:27:53 2019 9 | 10 | @author: jackielinxiao 11 | """ 12 | 13 | import tensorflow as tf 14 | import numpy as np 15 | 16 | x_data = np.float32(np.random.rand(2, 100)) 17 | y_data = np.dot([0.100, 0.200], x_data) + 0.300 18 | 19 | weight_a = tf.placeholder(tf.float32) 20 | weight_b = tf.placeholder(tf.float32) 21 | 22 | b = tf.Variable(tf.zeros([1])) 23 | W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0)) 24 | y = tf.matmul(W, x_data) + b 25 | 26 | loss_a = tf.reduce_mean(tf.square(y - y_data)) 27 | loss_b = tf.reduce_mean(tf.square(W)) 28 | loss = weight_a * loss_a + weight_b * loss_b 29 | 30 | optimizer = tf.train.GradientDescentOptimizer(0.5) 31 | 32 | a_gradients = tf.gradients(loss_a, W) 33 | b_gradients = tf.gradients(loss_b, W) 34 | 35 | train = optimizer.minimize(loss) 36 | 37 | 38 | init = tf.initialize_all_variables() 39 | 40 | sess = tf.Session() 41 | sess.run(init) 42 | 43 | def pareto_step(weights_list, out_gradients_list): 44 | model_gradients = out_gradients_list 45 | M1 = np.matmul(model_gradients,np.transpose(model_gradients)) 46 | e = np.mat(np.ones(np.shape(weights_list))) 47 | M = np.hstack((M1,np.transpose(e))) 48 | mid = np.hstack((e,np.mat(np.zeros((1,1))))) 49 | M = np.vstack((M,mid)) 50 | z = np.mat(np.zeros(np.shape(weights_list))) 51 | nid = np.hstack((z,np.mat(np.ones((1,1))))) 52 | w = np.matmul(np.matmul(M,np.linalg.inv(np.matmul(M,np.transpose(M)))),np.transpose(nid)) 53 | if len(w)>1: 54 | w = np.transpose(w) 55 | w = w[0,0:np.shape(w)[1]] 56 | mid = np.where(w > 0, 1.0, 0) 57 | nid = np.multiply(mid, w) 58 | uid = sorted(nid[0].tolist()[0], reverse=True) 59 | sv = np.cumsum(uid) 60 | rho = np.where(uid > (sv - 1.0) / range(1,len(uid)+1), 1.0, 0.0) 61 | r = max(np.argwhere(rho)) 62 | theta = max(0, (sv[r] - 1.0) / (r+1)) 63 | w = np.where(nid - theta>0.0, nid - theta, 0) 64 | return w 65 | 66 | 67 | w_a = 0.5 68 | w_b = 0.5 69 | for step in xrange(0, 50): 70 | res = sess.run([a_gradients,b_gradients,train],feed_dict={weight_a:w_a,weight_b:w_b}) 71 | #res[0] 72 | weights = np.mat([w_a, w_b]) 73 | paras = np.vstack((res[0][0],res[1][0])) 74 | mid = pareto_step(weights,paras) 75 | w_a, w_b = mid[0,0], mid[0,1] 76 | print w_a, w_b, step, sess.run(W), sess.run(b) 77 | #print alpha, 1.0-alpha --------------------------------------------------------------------------------