├── LICENSE ├── README.md ├── bpr.py ├── bpr.scala ├── distbpr.py ├── evaluation.py └── runmf.py /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Alfredo Láinez 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # bpr-spark 2 | 3 | Bayesian Personalized Ranking for Spark 4 | 5 | [Link to paper with explained architecture](https://stanford.edu/~rezab/classes/cme323/S16/projects_reports/rodrigo_oliveira.pdf) 6 | -------------------------------------------------------------------------------- /bpr.py: -------------------------------------------------------------------------------- 1 | import random 2 | from tqdm import tqdm 3 | import numpy as np 4 | 5 | 6 | def _gradient_single_point(user_id, prod_id, prod_id_neg, 7 | user_mat, prod_mat, lambda_reg, alpha): 8 | 9 | x_uij = user_mat[user_id].dot(prod_mat[prod_id]) - \ 10 | user_mat[user_id].dot(prod_mat[prod_id_neg]) 11 | 12 | step_size = np.exp(-x_uij) / (1 + np.exp(-x_uij)) * alpha 13 | 14 | user_mat[user_id] += step_size * \ 15 | (prod_mat[prod_id] - prod_mat[prod_id_neg]) + \ 16 | lambda_reg * user_mat[user_id] 17 | 18 | prod_mat[prod_id] += step_size * \ 19 | user_mat[user_id] + lambda_reg * prod_mat[prod_id] 20 | 21 | prod_mat[prod_id_neg] -= step_size * \ 22 | user_mat[user_id] + lambda_reg * prod_mat[prod_id_neg] 23 | 24 | 25 | def _sample_optimize_partition(ratings, user_mat, prod_mat, num_prods, 26 | lambda_reg=0.001, alpha=0.1, position=None): 27 | 28 | sampled_ratings = random.sample(list(ratings), 20000) 29 | 30 | for u, i, j in tqdm(sampled_ratings, position=position, leave=False): 31 | _gradient_single_point(u, i, j, user_mat, prod_mat, lambda_reg, alpha) 32 | 33 | yield user_mat, prod_mat 34 | 35 | 36 | def optimizeMF(ratings, rank, num_iter=10, num_neg_samples=30): 37 | """ Provides a spark-facing non-ditributed version of BPR 38 | 39 | Args: 40 | ----- 41 | ratings: an rdd of (user, item) pairs 42 | num_iter: number of iterations 43 | num_neg_samples: how many negative samples to take 44 | 45 | Returns: 46 | -------- 47 | (user_mat, prod_mat) 48 | """ 49 | 50 | ratings_partitioned = ratings.partitionBy(4).persist() 51 | 52 | num_users = ratings_partitioned.map(lambda x: x[0]).max() 53 | num_prods = ratings_partitioned.map(lambda x: x[1]).max() 54 | 55 | user_mat = np.random.uniform(size=(num_users + 1, rank)) 56 | prod_mat = np.random.uniform(size=(num_prods + 1, rank)) 57 | 58 | for _ in xrange(num_iter): 59 | 60 | result = ratings_partitioned.flatMap( 61 | lambda x: [x] * num_neg_samples 62 | ).map( 63 | lambda x: x[:2] + (np.random.randint(num_prods) + 1, ) 64 | ).mapPartitionsWithIndex( 65 | lambda ix, ratings: _sample_optimize_partition( 66 | ratings, user_mat, prod_mat, num_prods, position=ix 67 | ) 68 | ).persist() 69 | 70 | num = float(result.count()) 71 | 72 | user_mat, prod_mat = result.reduce( 73 | lambda a, b: (a[0] + b[0], a[1] + b[1])) 74 | 75 | user_mat /= num 76 | prod_mat /= num 77 | 78 | return (user_mat, prod_mat) 79 | -------------------------------------------------------------------------------- /bpr.scala: -------------------------------------------------------------------------------- 1 | import java.io.File 2 | import scala.util.Random 3 | 4 | import org.apache.spark.{RangePartitioner, SparkConf, SparkContext} 5 | import org.apache.spark.mllib.recommendation.{ALS, Rating} 6 | import org.apache.spark.rdd.RDD 7 | 8 | import breeze.optimize.linear._ 9 | import breeze.numerics._ 10 | import breeze.util.partition 11 | import breeze.linalg.{DenseMatrix, DenseVector} 12 | 13 | object MainBPR { 14 | 15 | def main(args: Array[String]) { 16 | 17 | /** 18 | * Bayesian Personalized Ranking 19 | * This object implements a version of BPR with matrix factorization where both user and item matrices are not distributed 20 | * This is: SGD is performed in a distributed fashion but item and user matrices need to be averaged in the driver each 21 | * iteration. 22 | * 23 | * For a user distributed version, see below. 24 | */ 25 | object BPR extends Serializable { 26 | 27 | private def gradientSinglePoint(userId: Int, prodPos: Int, prodNeg: Int, 28 | userMat: DenseMatrix[Double], prodMat: DenseMatrix[Double], 29 | lambdaReg: Double = 0.01, alpha: Double = 0.1): Unit = { 30 | 31 | val x_uij = userMat(userId, ::).dot(prodMat(prodPos, ::)) - userMat(userId, ::).dot(prodMat(prodNeg, ::)) 32 | 33 | val scale = math.exp(-x_uij) / (1 + math.exp(-x_uij)) 34 | prodMat(prodPos, ::) :+= ((userMat(userId, ::) :* scale) + (prodMat(prodPos, ::) :* lambdaReg)) :* alpha 35 | prodMat(prodNeg, ::) :+= ((-userMat(userId, ::) :* scale) + (prodMat(prodNeg, ::) :* lambdaReg)) :* alpha 36 | userMat(userId, ::) :+= (((prodMat(prodPos, ::) - prodMat(prodNeg, ::)) :* scale) + 37 | (userMat(userId, ::) :* lambdaReg)) :* alpha 38 | } 39 | 40 | private def sampleAndOptimizePartition(ratings: Iterator[(Int, Int)], userMat: DenseMatrix[Double], 41 | prodMat: DenseMatrix[Double], numProds: Int, numSamples: Int = 20000, 42 | lambdaReg: Double = 0.1, alpha: Double = 0.01): Iterator[Tuple2[DenseMatrix[Double], DenseMatrix[Double]]] = { 43 | 44 | val NUM_OF_NEGATIVE_PER_IMPLICIT = 5 45 | 46 | val positiveRatingsRepeated = ratings.flatMap(x => Vector.fill(NUM_OF_NEGATIVE_PER_IMPLICIT)(x)).toVector 47 | val negativeRatings = positiveRatingsRepeated.map(x => (x._1, x._2, Random.nextInt(numProds) + 1)) 48 | 49 | val sampledRatings = Random.shuffle(negativeRatings.toList).toVector.slice(0, numSamples) 50 | 51 | for (sampledPoint <- sampledRatings) { 52 | gradientSinglePoint(sampledPoint._1, sampledPoint._2, sampledPoint._3, userMat, prodMat) 53 | } 54 | 55 | return List((userMat, prodMat)).iterator 56 | } 57 | 58 | def optimizeMF(ratings: RDD[(Int, Int)], rank: Int = 10, 59 | numIterations: Int = 10, numPartitions: Int = 4): (DenseMatrix[Double], DenseMatrix[Double]) = { 60 | 61 | // Partition by user 62 | val userPartitioner = new RangePartitioner(4, ratings) 63 | val ratingsPartitioned = ratings.partitionBy(userPartitioner).persist() 64 | 65 | val numUsers = ratingsPartitioned.map(x => x._1).max() 66 | val numProds = ratingsPartitioned.map(x => x._2).max() 67 | 68 | var userMat: DenseMatrix[Double] = DenseMatrix.rand[Double](numUsers + 1, rank) 69 | var prodMat: DenseMatrix[Double] = DenseMatrix.rand[Double](numProds + 1, rank) 70 | 71 | for (i <- 1 until numIterations) { 72 | val result = ratingsPartitioned.mapPartitions { 73 | ratings => sampleAndOptimizePartition(ratings, userMat, prodMat, numProds) 74 | } 75 | 76 | // Average through parameters 77 | val numReducers = result.count.toDouble 78 | val averagedMatrices = result.reduce((a, b) => (a._1 + b._1, a._2 + b._2)) 79 | 80 | userMat = averagedMatrices._1 :/ numReducers 81 | prodMat = averagedMatrices._2 :/ numReducers 82 | } 83 | 84 | return (userMat, prodMat) 85 | } 86 | 87 | } 88 | 89 | /** 90 | * Bayesian Personalized Ranking with distributed user matrix 91 | * This object implements a version of BPR with matrix factorization where the user matrix is distributed 92 | * across workers. 93 | * 94 | * NOTE: CURRENTLY THERE IS A BUG AND THIS VERSION IS NOT WORKING. LOOK AT THE PYTHON VERSION IN THIS REPOSITORY 95 | * FOR A WORKING VERSION 96 | */ 97 | object DistributedUserBPR extends Serializable { 98 | 99 | private def sampleAndOptimizePartition(userRatingsFeatures: Iterator[(Int, (Iterable[Int], DenseVector[Double]))], 100 | prodMat: DenseMatrix[Double], numProds: Int, 101 | numSamples: Int = 50000, lambdaReg: Double = 0.1, 102 | alpha: Double = 0.01): Iterator[(DenseMatrix[Double], Array[(Int, DenseVector[Double])])] = { 103 | 104 | val NUM_OF_NEGATIVE_PER_IMPLICIT = 30 105 | 106 | val ratings = userRatingsFeatures.flatMap{ 107 | case (userId, (products, _)) => products.map(prod => (userId, prod)) 108 | } 109 | 110 | val userVectors = scala.collection.mutable.Map[Int, DenseVector[Double]]() 111 | for (user <- userRatingsFeatures){ 112 | userVectors(user._1) = user._2._2 113 | } 114 | 115 | val positiveRatingsRepeated = ratings.flatMap(x => Vector.fill(NUM_OF_NEGATIVE_PER_IMPLICIT)(x)).toVector 116 | val negativeRatings = positiveRatingsRepeated.map(x => (x._1, x._2, Random.nextInt(numProds) + 1)) 117 | 118 | val sampledRatings = Random.shuffle(negativeRatings.toList).toVector.slice(0, numSamples) 119 | 120 | // SGD 121 | for (sampledPoint <- sampledRatings) { 122 | val userId = sampledPoint._1 123 | val prodPos = sampledPoint._2 124 | val prodNeg = sampledPoint._3 125 | val userVector = userVectors.apply(userId) 126 | 127 | val x_uij = userVector.dot(prodMat(prodPos, ::).t) - userVector.dot(prodMat(prodNeg, ::).t) 128 | val scale = math.exp(-x_uij) / (1 + math.exp(-x_uij)) 129 | 130 | prodMat(prodPos, ::) :+= ((userVector :* scale).t + (prodMat(prodPos, ::) :* lambdaReg)) :* alpha 131 | prodMat(prodNeg, ::) :+= ((-userVector :* scale).t + (prodMat(prodNeg, ::) :* lambdaReg)) :* alpha 132 | 133 | val newUserVector = userVector + (((prodMat(prodPos, ::) - prodMat(prodNeg, ::)) :* scale).t + 134 | (userVector :* lambdaReg)) :* alpha 135 | userVectors(userId) = newUserVector 136 | } 137 | 138 | return List((prodMat, userVectors.toArray)).iterator 139 | } 140 | 141 | def optimizeMF(ratings: RDD[(Int, Int)], rank: Int = 10, 142 | numIterations: Int = 10, numPartitions: Int = 4): (DenseMatrix[Double], DenseMatrix[Double]) = { 143 | 144 | val numProds = ratings.map(x => x._2).max() 145 | 146 | // Partition by user: also create the distributed vector 147 | val ratingsByUser = ratings.groupByKey().persist() 148 | val userRatingsFeatures = ratingsByUser.map{ 149 | case (userId, products) => (userId, (products, DenseVector.rand[Double](rank))) 150 | } 151 | 152 | // TODO: In distributed version, partitioning is not important anymore (all products go with user) 153 | val userPartitioner = new RangePartitioner(numPartitions, userRatingsFeatures) 154 | var ratingsPartitioned = userRatingsFeatures.partitionBy(userPartitioner).persist() 155 | 156 | var prodMat: DenseMatrix[Double] = DenseMatrix.rand[Double](numProds + 1, rank) 157 | 158 | for (i <- 1 until numIterations) { 159 | val result = ratingsPartitioned.mapPartitions { 160 | ratings => sampleAndOptimizePartition(ratings, prodMat, numProds) 161 | } 162 | 163 | prodMat = result.map(x => x._1).reduce((a, b) => a + b) :/ result.count.toDouble 164 | 165 | val userVectorsRDD = result.map(x => x._2).flatMap(x => x.map(y => y)) 166 | ratingsPartitioned = ratingsByUser.join(userVectorsRDD).cache() 167 | } 168 | 169 | // Only for evaluation purposes 170 | val numUsers = ratings.map(x => x._1).max() 171 | 172 | var userMat: DenseMatrix[Double] = DenseMatrix.rand[Double](numUsers + 1, rank) 173 | val userVectors = ratingsPartitioned.collect().map{ case (userId, (products, vector)) => (userId, vector) } // try without collect 174 | for (user <- userVectors){ 175 | userMat(user._1,::) := user._2.t 176 | } 177 | 178 | return (userMat, prodMat) 179 | } 180 | 181 | } 182 | 183 | val conf = new SparkConf().setAppName("BPR").setMaster("local") 184 | val sc = new SparkContext(conf) 185 | 186 | val ratingsBPR = sc.textFile("/home/alfredo/Desktop/bpr/training_ratings.txt").map(line => line.split(" ")).map(x => (x(0).toInt, x(1).toInt)) 187 | //val (userMat, prodMat) = BPR.optimizeMF(ratingsBPR, 10, 10) 188 | val (userMat, prodMat) = DistributedUserBPR.optimizeMF(ratingsBPR, 10, 20) 189 | 190 | breeze.linalg.csvwrite(new File("/home/alfredo/Desktop/bpr/userMatrix.txt"), userMat, separator = ' ') 191 | breeze.linalg.csvwrite(new File("/home/alfredo/Desktop/bpr/prodMatrix.txt"), prodMat, separator = ' ') 192 | 193 | 194 | // WITH ALS 195 | val ratings = sc.textFile("/home/alfredo/Desktop/bpr/training_ratings.txt").map(line => line.split(" ")).map(x => (x(0).toInt, x(1).toInt, 1)) 196 | 197 | val rank = 10 198 | val numIterations = 10 199 | val ALSRatings = ratings.map{ case (user, item, num) => Rating(user.toInt, item.toInt, num.toDouble)} 200 | val model = ALS.trainImplicit(ALSRatings, rank, numIterations, 0.01, 0.01) 201 | 202 | model.productFeatures.saveAsTextFile("/home/alfredo/Desktop/bpr/als_product_matrix.txt") 203 | model.userFeatures.saveAsTextFile("/home/alfredo/Desktop/bpr/als_user_matrix.txt") 204 | 205 | // Evaluate the model on rating data 206 | val usersProducts = ALSRatings.map { case Rating(user, product, rate) => 207 | (user, product) 208 | } 209 | val predictions = 210 | model.predict(usersProducts).map { case Rating(user, product, rate) => 211 | ((user, product), rate) 212 | } 213 | val ratesAndPreds = ALSRatings.map { case Rating(user, product, rate) => 214 | ((user, product), rate) 215 | }.join(predictions) 216 | val MSE = ratesAndPreds.map { case ((user, product), (r1, r2)) => 217 | val err = (r1 - r2) 218 | err * err 219 | }.mean() 220 | println("Mean Squared Error = " + MSE) 221 | 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /distbpr.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | 4 | 5 | def _optimize_partition(user_ratings, prod_mat, nb_prods, l2_reg=0.001, 6 | alpha=0.1, negative_samples=30, num_samples=20000): 7 | 8 | # yank everything out of the iterator 9 | user_ratings = [_ for _ in user_ratings] 10 | 11 | ratings = ( 12 | (u_id, prod) 13 | for (u_id, (products, _)) in user_ratings 14 | for prod in products 15 | ) 16 | 17 | user_vectors = {u: v for (u, (_, v)) in user_ratings} 18 | 19 | pos_repeated = ( 20 | a 21 | for b in ([x] * negative_samples for x in ratings) 22 | for a in b 23 | ) 24 | 25 | neg_ratings = [ 26 | (x[0], x[1], np.random.randint(nb_prods) + 1) 27 | for x in pos_repeated 28 | ] 29 | 30 | shuff_ratings = neg_ratings 31 | random.shuffle(shuff_ratings) 32 | 33 | for u_id, pos_id, neg_id in tqdm(shuff_ratings[:num_samples]): 34 | 35 | u_vector = user_vectors.get(u_id) 36 | x_uij = u_vector.dot(prod_mat[pos_id]) - u_vector.dot(prod_mat[neg_id]) 37 | 38 | scale = np.exp(-x_uij) / (1 + np.exp(-x_uij)) 39 | 40 | prod_mat[pos_id] += alpha * ( 41 | (scale * u_vector) + l2_reg * prod_mat[pos_id]) 42 | 43 | prod_mat[neg_id] += alpha * ( 44 | (scale * -1 * u_vector) + l2_reg * prod_mat[neg_id]) 45 | 46 | user_vectors[u_id] = u_vector + alpha * ( 47 | scale * (prod_mat[pos_id] - prod_mat[neg_id]) + l2_reg * u_vector) 48 | 49 | yield (prod_mat, user_vectors.items()) 50 | 51 | 52 | def optimizeMF(ratings, rank=10, nb_iter=10, nb_partitions=4, 53 | num_samples=100000, l2_reg=0.001, alpha=0.1, 54 | negative_samples=30): 55 | """optimize BPR for Matrix Factorization 56 | 57 | Args: 58 | ----- 59 | ratings: RDD of (user, item) implicit interactions 60 | rank: latent factor dimension 61 | nb_iter: how many iterations of SGD 62 | nb_partitions: how many user partitions to distribute 63 | num_samples: |D_s| from the paper 64 | negative_samples: how many negative samples per positive example 65 | l2_reg: regularization parameter 66 | alpha: learning rate 67 | 68 | Returns: 69 | -------- 70 | (userMat, itemMat) 71 | """ 72 | 73 | nb_prods = ratings.map(lambda (_, i): i).max() 74 | ratings_by_user = ratings.groupByKey().persist() 75 | 76 | def make_vec((u_id, products)): 77 | return (u_id, (products, np.random.uniform(size=rank))) 78 | 79 | user_ratings = ratings_by_user.map(make_vec).persist() 80 | ratings_partitioned = user_ratings.partitionBy(nb_partitions).persist() 81 | prod_mat = np.random.uniform(size=(nb_prods + 1, rank)) 82 | 83 | for _ in xrange(nb_iter): 84 | result = ratings_partitioned.mapPartitions( 85 | # sample and apply the gradient 86 | lambda ratings: _optimize_partition( 87 | user_ratings=ratings, 88 | prod_mat=prod_mat, 89 | nb_prods=nb_prods, 90 | num_samples=num_samples, 91 | l2_reg=l2_reg, 92 | alpha=alpha, 93 | negative_samples=negative_samples 94 | ) 95 | ).persist() # cache for later 96 | 97 | prod_mat = result.map( 98 | # get the product matrices 99 | lambda x: x[0] 100 | ).reduce( 101 | # reduce as matrix addition 102 | lambda x, y: x + y 103 | ) / result.count() 104 | 105 | user_vecs_rdd = result.map(lambda x: x[1]).flatMap(lambda x: x) 106 | ratings_partitioned = ratings_by_user.join(user_vecs_rdd) 107 | 108 | # release this 109 | result.unpersist() 110 | 111 | # Only for evaluation purposes 112 | nb_users = ratings.map(lambda x: x[0]).max() 113 | 114 | user_mat = np.random.uniform(size=(nb_users + 1, rank)) 115 | 116 | user_vectors = map( 117 | lambda (u_id, (products, vector)): (u_id, vector), 118 | ratings_partitioned.toLocalIterator() 119 | ) 120 | 121 | for u, v in user_vectors: 122 | user_mat[u] = v 123 | 124 | return (user_mat, prod_mat) 125 | -------------------------------------------------------------------------------- /evaluation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | # Read ratings 3 | 4 | users = {} 5 | i = 0 6 | with open("/home/alfredo/Desktop/bpr/kaggle_users.txt") as f: 7 | for line in f: 8 | users[line.strip()] = i 9 | i += 1 10 | 11 | songs = {} 12 | with open("/home/alfredo/Desktop/bpr/kaggle_songs.txt") as f: 13 | for line in f: 14 | song, id = line.strip().split(" ") 15 | songs[song] = int(id) 16 | 17 | ratings = [] 18 | with open("/home/alfredo/Desktop/bpr/kaggle_visible_evaluation_triplets.txt") as f: 19 | for line in f: 20 | user, song, _ = line.strip().split("\t") 21 | ratings.append((users[user], songs[song])) 22 | 23 | # Each user at least 10 songs, each song at least 10 users 24 | songs_by_user = {} 25 | users_by_song = {} 26 | for rating in ratings: 27 | if rating[0] in songs_by_user: 28 | songs_by_user[rating[0]].append(rating[1]) 29 | else: 30 | songs_by_user[rating[0]] = [rating[1]] 31 | 32 | if rating[1] in users_by_song: 33 | users_by_song[rating[1]].append(rating[0]) 34 | else: 35 | users_by_song[rating[1]] = [rating[0]] 36 | 37 | surviving_users = {} 38 | for user in songs_by_user: 39 | if len(songs_by_user[user]) >= 10: 40 | surviving_users[user] = True 41 | 42 | surviving_songs = {} 43 | for song in users_by_song: 44 | if len(users_by_song[song]) >= 10: 45 | surviving_songs[song] = True 46 | 47 | final_ratings = [] 48 | for rating in ratings: 49 | if rating[0] in surviving_users and rating[1] in surviving_songs: 50 | final_ratings.append(rating) 51 | 52 | final_ratings_by_user = {} 53 | for rating in final_ratings: 54 | if rating[0] in final_ratings_by_user: 55 | final_ratings_by_user[rating[0]].append(rating[1]) 56 | else: 57 | final_ratings_by_user[rating[0]] = [rating[1]] 58 | 59 | train_ratings = [] 60 | test_ratings = [] 61 | test_rating_by_user = {} 62 | for user in final_ratings_by_user: 63 | # First element goes to test set, rest to training 64 | test_ratings.append((user, final_ratings_by_user[user][0])) 65 | test_rating_by_user[user] = final_ratings_by_user[user][0] 66 | 67 | for song in final_ratings_by_user[user][1:]: 68 | train_ratings.append((user, song)) 69 | 70 | # Write training elements in file 71 | with open("/home/alfredo/Desktop/bpr/training_ratings.txt", "w") as f: 72 | for rating in train_ratings: 73 | f.write("%d %d\n" % (rating[0], rating[1])) 74 | 75 | 76 | 77 | ############################ 78 | ### EVALUATION 79 | ############################ 80 | # Read model matrices 81 | userMat = np.loadtxt("/home/alfredo/Desktop/bpr/userMatrix.txt") 82 | prodMat = np.loadtxt("/home/alfredo/Desktop/bpr/prodMatrix.txt") 83 | 84 | songs = {} 85 | for rating in final_ratings: 86 | if rating[1] not in songs: 87 | songs[rating[1]] = True 88 | 89 | # Compute AUC 90 | total_auc = 0 91 | total_users = 0 92 | for user in final_ratings_by_user.keys()[:1000]: # Choose a sample of users (all is too much) 93 | # Only one test rating by user, as seen in the BPR paper 94 | song_test = test_rating_by_user[user] 95 | num_ratings = 0 96 | auc_user = 0 97 | for song in songs.keys(): 98 | if song not in final_ratings_by_user[user]: 99 | testScore = userMat[user-1].dot(prodMat[song_test]) 100 | otherScore = userMat[user-1].dot(prodMat[song]) 101 | 102 | # We want the test song score to be higher 103 | if testScore > otherScore: 104 | auc_user += 1 105 | num_ratings += 1 106 | 107 | auc_user = auc_user * 1.0 / num_ratings 108 | total_auc += auc_user 109 | total_users += 1 110 | print ("User: %d, AUC: %f" % (total_users, auc_user)) 111 | 112 | total_auc = total_auc / len(final_ratings_by_user.keys()[:1000]) 113 | print ("Total AUC is %f" % total_auc) -------------------------------------------------------------------------------- /runmf.py: -------------------------------------------------------------------------------- 1 | from pyspark import SparkConf, SparkContext 2 | from bpr import optimizeMF 3 | 4 | conf = (SparkConf().setMaster("local") 5 | .setAppName("BPR") 6 | .set("spark.executor.memory", "10g")) 7 | 8 | sc = SparkContext(conf=conf) 9 | 10 | 11 | if __name__ == '__main__': 12 | 13 | PREFIX = './' 14 | 15 | ratings = sc.textFile( 16 | "%straining_ratings.txt" % PREFIX 17 | ).map( 18 | lambda line: line.split(" ") 19 | ).map( 20 | lambda x: map(int, x[:2]) 21 | ) 22 | 23 | userMat, prodMat = optimizeMF(ratings, 10, 10) --------------------------------------------------------------------------------