├── Apriori.png
├── Apriori_and_ECLAT.ipynb
├── ECLAT Pair.png
├── Market_Basket_Optimisation.csv
├── README.md
└── apyori.py
/Apriori.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amyoshino/Recommendation-System-with-Apriori-and-ECLAT/64b1358c26fdd9461bf7344ea095c873d1e9381a/Apriori.png
--------------------------------------------------------------------------------
/Apriori_and_ECLAT.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Association Rule - Apriori and ECLAT \n",
8 | "\n",
9 | "Training association rule models (Apriori and ECLAT) to find the most related items bought by customers of a french supermarket during a week. All 7501 lines of the dataset represent items bought by an unique customer, during this week.\n",
10 | "\n",
11 | "This algorithm associate products preferences by most of the customers and can be used to generate products recommendation and help on displaying products strategy."
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 1,
17 | "metadata": {
18 | "collapsed": true
19 | },
20 | "outputs": [],
21 | "source": [
22 | "# Importing the libraries\n",
23 | "import numpy as np\n",
24 | "import matplotlib.pyplot as plt\n",
25 | "import pandas as pd"
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": 2,
31 | "metadata": {
32 | "collapsed": true
33 | },
34 | "outputs": [],
35 | "source": [
36 | "# Data Loading\n",
37 | "dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None)\n",
38 | "\n",
39 | "# Adding all customers into a list of lists\n",
40 | "transactions = []\n",
41 | "for i in range(0, 7501):\n",
42 | " transactions.append([str(dataset.values[i,j]) for j in range(0, 20)])"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": 3,
48 | "metadata": {
49 | "collapsed": false
50 | },
51 | "outputs": [
52 | {
53 | "data": {
54 | "text/html": [
55 | "
\n",
56 | "
\n",
57 | " \n",
58 | " \n",
59 | " | \n",
60 | " 0 | \n",
61 | " 1 | \n",
62 | " 2 | \n",
63 | " 3 | \n",
64 | " 4 | \n",
65 | " 5 | \n",
66 | " 6 | \n",
67 | " 7 | \n",
68 | " 8 | \n",
69 | " 9 | \n",
70 | " 10 | \n",
71 | " 11 | \n",
72 | " 12 | \n",
73 | " 13 | \n",
74 | " 14 | \n",
75 | " 15 | \n",
76 | " 16 | \n",
77 | " 17 | \n",
78 | " 18 | \n",
79 | " 19 | \n",
80 | "
\n",
81 | " \n",
82 | " \n",
83 | " \n",
84 | " 0 | \n",
85 | " shrimp | \n",
86 | " almonds | \n",
87 | " avocado | \n",
88 | " vegetables mix | \n",
89 | " green grapes | \n",
90 | " whole weat flour | \n",
91 | " yams | \n",
92 | " cottage cheese | \n",
93 | " energy drink | \n",
94 | " tomato juice | \n",
95 | " low fat yogurt | \n",
96 | " green tea | \n",
97 | " honey | \n",
98 | " salad | \n",
99 | " mineral water | \n",
100 | " salmon | \n",
101 | " antioxydant juice | \n",
102 | " frozen smoothie | \n",
103 | " spinach | \n",
104 | " olive oil | \n",
105 | "
\n",
106 | " \n",
107 | " 1 | \n",
108 | " burgers | \n",
109 | " meatballs | \n",
110 | " eggs | \n",
111 | " NaN | \n",
112 | " NaN | \n",
113 | " NaN | \n",
114 | " NaN | \n",
115 | " NaN | \n",
116 | " NaN | \n",
117 | " NaN | \n",
118 | " NaN | \n",
119 | " NaN | \n",
120 | " NaN | \n",
121 | " NaN | \n",
122 | " NaN | \n",
123 | " NaN | \n",
124 | " NaN | \n",
125 | " NaN | \n",
126 | " NaN | \n",
127 | " NaN | \n",
128 | "
\n",
129 | " \n",
130 | " 2 | \n",
131 | " chutney | \n",
132 | " NaN | \n",
133 | " NaN | \n",
134 | " NaN | \n",
135 | " NaN | \n",
136 | " NaN | \n",
137 | " NaN | \n",
138 | " NaN | \n",
139 | " NaN | \n",
140 | " NaN | \n",
141 | " NaN | \n",
142 | " NaN | \n",
143 | " NaN | \n",
144 | " NaN | \n",
145 | " NaN | \n",
146 | " NaN | \n",
147 | " NaN | \n",
148 | " NaN | \n",
149 | " NaN | \n",
150 | " NaN | \n",
151 | "
\n",
152 | " \n",
153 | " 3 | \n",
154 | " turkey | \n",
155 | " avocado | \n",
156 | " NaN | \n",
157 | " NaN | \n",
158 | " NaN | \n",
159 | " NaN | \n",
160 | " NaN | \n",
161 | " NaN | \n",
162 | " NaN | \n",
163 | " NaN | \n",
164 | " NaN | \n",
165 | " NaN | \n",
166 | " NaN | \n",
167 | " NaN | \n",
168 | " NaN | \n",
169 | " NaN | \n",
170 | " NaN | \n",
171 | " NaN | \n",
172 | " NaN | \n",
173 | " NaN | \n",
174 | "
\n",
175 | " \n",
176 | " 4 | \n",
177 | " mineral water | \n",
178 | " milk | \n",
179 | " energy bar | \n",
180 | " whole wheat rice | \n",
181 | " green tea | \n",
182 | " NaN | \n",
183 | " NaN | \n",
184 | " NaN | \n",
185 | " NaN | \n",
186 | " NaN | \n",
187 | " NaN | \n",
188 | " NaN | \n",
189 | " NaN | \n",
190 | " NaN | \n",
191 | " NaN | \n",
192 | " NaN | \n",
193 | " NaN | \n",
194 | " NaN | \n",
195 | " NaN | \n",
196 | " NaN | \n",
197 | "
\n",
198 | " \n",
199 | "
\n",
200 | "
"
201 | ],
202 | "text/plain": [
203 | " 0 1 2 3 4 \\\n",
204 | "0 shrimp almonds avocado vegetables mix green grapes \n",
205 | "1 burgers meatballs eggs NaN NaN \n",
206 | "2 chutney NaN NaN NaN NaN \n",
207 | "3 turkey avocado NaN NaN NaN \n",
208 | "4 mineral water milk energy bar whole wheat rice green tea \n",
209 | "\n",
210 | " 5 6 7 8 9 \\\n",
211 | "0 whole weat flour yams cottage cheese energy drink tomato juice \n",
212 | "1 NaN NaN NaN NaN NaN \n",
213 | "2 NaN NaN NaN NaN NaN \n",
214 | "3 NaN NaN NaN NaN NaN \n",
215 | "4 NaN NaN NaN NaN NaN \n",
216 | "\n",
217 | " 10 11 12 13 14 15 \\\n",
218 | "0 low fat yogurt green tea honey salad mineral water salmon \n",
219 | "1 NaN NaN NaN NaN NaN NaN \n",
220 | "2 NaN NaN NaN NaN NaN NaN \n",
221 | "3 NaN NaN NaN NaN NaN NaN \n",
222 | "4 NaN NaN NaN NaN NaN NaN \n",
223 | "\n",
224 | " 16 17 18 19 \n",
225 | "0 antioxydant juice frozen smoothie spinach olive oil \n",
226 | "1 NaN NaN NaN NaN \n",
227 | "2 NaN NaN NaN NaN \n",
228 | "3 NaN NaN NaN NaN \n",
229 | "4 NaN NaN NaN NaN "
230 | ]
231 | },
232 | "execution_count": 3,
233 | "metadata": {},
234 | "output_type": "execute_result"
235 | }
236 | ],
237 | "source": [
238 | "dataset.head(5)"
239 | ]
240 | },
241 | {
242 | "cell_type": "markdown",
243 | "metadata": {},
244 | "source": [
245 | "### Apriori implementation using apyori library \n",
246 | "source: https://github.com/ymoch/apyori\n",
247 | "\n",
248 | "The output of this part is to see which are the products that used to be more bought in combination compared to other combinations using apriori algorithm.\n",
249 | "\n",
250 | "This code is a based on a lecture from the course: Machine Learning A-Z™ by Kirill Eremenko https://www.udemy.com/machinelearning/learn/v4/overview. I put some transformations to fit on dataframes and to make the visualization easier."
251 | ]
252 | },
253 | {
254 | "cell_type": "code",
255 | "execution_count": 4,
256 | "metadata": {
257 | "collapsed": false
258 | },
259 | "outputs": [
260 | {
261 | "data": {
262 | "text/plain": [
263 | "[['shrimp',\n",
264 | " 'almonds',\n",
265 | " 'avocado',\n",
266 | " 'vegetables mix',\n",
267 | " 'green grapes',\n",
268 | " 'whole weat flour',\n",
269 | " 'yams',\n",
270 | " 'cottage cheese',\n",
271 | " 'energy drink',\n",
272 | " 'tomato juice',\n",
273 | " 'low fat yogurt',\n",
274 | " 'green tea',\n",
275 | " 'honey',\n",
276 | " 'salad',\n",
277 | " 'mineral water',\n",
278 | " 'salmon',\n",
279 | " 'antioxydant juice',\n",
280 | " 'frozen smoothie',\n",
281 | " 'spinach',\n",
282 | " 'olive oil'],\n",
283 | " ['burgers',\n",
284 | " 'meatballs',\n",
285 | " 'eggs',\n",
286 | " 'nan',\n",
287 | " 'nan',\n",
288 | " 'nan',\n",
289 | " 'nan',\n",
290 | " 'nan',\n",
291 | " 'nan',\n",
292 | " 'nan',\n",
293 | " 'nan',\n",
294 | " 'nan',\n",
295 | " 'nan',\n",
296 | " 'nan',\n",
297 | " 'nan',\n",
298 | " 'nan',\n",
299 | " 'nan',\n",
300 | " 'nan',\n",
301 | " 'nan',\n",
302 | " 'nan']]"
303 | ]
304 | },
305 | "execution_count": 4,
306 | "metadata": {},
307 | "output_type": "execute_result"
308 | }
309 | ],
310 | "source": [
311 | "# Inspecting elements\n",
312 | "transactions[:2]"
313 | ]
314 | },
315 | {
316 | "cell_type": "code",
317 | "execution_count": 5,
318 | "metadata": {
319 | "collapsed": true
320 | },
321 | "outputs": [],
322 | "source": [
323 | "# Training Apriori on the dataset\n",
324 | "# The hyperparameters choosen on this training are:\n",
325 | "# min_support = items bought more than 3 times a day * 7 days (week) / 7500 customers = 0.0028\n",
326 | "# min_confidence: at least 20%, min_lift = minimum of 3 (less than that is too low)\n",
327 | "\n",
328 | "from apyori import apriori\n",
329 | "rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2)"
330 | ]
331 | },
332 | {
333 | "cell_type": "code",
334 | "execution_count": 6,
335 | "metadata": {
336 | "collapsed": true
337 | },
338 | "outputs": [],
339 | "source": [
340 | "# Visualising the results\n",
341 | "results = list(rules)"
342 | ]
343 | },
344 | {
345 | "cell_type": "code",
346 | "execution_count": 7,
347 | "metadata": {
348 | "collapsed": false
349 | },
350 | "outputs": [],
351 | "source": [
352 | "lift = []\n",
353 | "association = []\n",
354 | "for i in range (0, len(results)):\n",
355 | " lift.append(results[:len(results)][i][2][0][3])\n",
356 | " association.append(list(results[:len(results)][i][0]))"
357 | ]
358 | },
359 | {
360 | "cell_type": "markdown",
361 | "metadata": {},
362 | "source": [
363 | "### Visualizing results in a dataframe"
364 | ]
365 | },
366 | {
367 | "cell_type": "code",
368 | "execution_count": 8,
369 | "metadata": {
370 | "collapsed": false
371 | },
372 | "outputs": [],
373 | "source": [
374 | "rank = pd.DataFrame([association, lift]).T\n",
375 | "rank.columns = ['Association', 'Lift']"
376 | ]
377 | },
378 | {
379 | "cell_type": "code",
380 | "execution_count": 9,
381 | "metadata": {
382 | "collapsed": false
383 | },
384 | "outputs": [
385 | {
386 | "data": {
387 | "text/html": [
388 | "\n",
389 | "
\n",
390 | " \n",
391 | " \n",
392 | " | \n",
393 | " Association | \n",
394 | " Lift | \n",
395 | "
\n",
396 | " \n",
397 | " \n",
398 | " \n",
399 | " 128 | \n",
400 | " [olive oil, whole wheat pasta, mineral water, ... | \n",
401 | " 6.11586 | \n",
402 | "
\n",
403 | " \n",
404 | " 58 | \n",
405 | " [olive oil, whole wheat pasta, mineral water] | \n",
406 | " 6.11586 | \n",
407 | "
\n",
408 | " \n",
409 | " 96 | \n",
410 | " [soup, mineral water, frozen vegetables, milk] | \n",
411 | " 5.48441 | \n",
412 | "
\n",
413 | " \n",
414 | " 146 | \n",
415 | " [soup, mineral water, frozen vegetables, nan, ... | \n",
416 | " 5.48441 | \n",
417 | "
\n",
418 | " \n",
419 | " 28 | \n",
420 | " [honey, fromage blanc, nan] | \n",
421 | " 5.16427 | \n",
422 | "
\n",
423 | " \n",
424 | " 3 | \n",
425 | " [honey, fromage blanc] | \n",
426 | " 5.16427 | \n",
427 | "
\n",
428 | " \n",
429 | " 16 | \n",
430 | " [chicken, nan, light cream] | \n",
431 | " 4.84395 | \n",
432 | "
\n",
433 | " \n",
434 | " 0 | \n",
435 | " [chicken, light cream] | \n",
436 | " 4.84395 | \n",
437 | "
\n",
438 | " \n",
439 | " 2 | \n",
440 | " [pasta, escalope] | \n",
441 | " 4.70081 | \n",
442 | "
\n",
443 | " \n",
444 | " 26 | \n",
445 | " [pasta, escalope, nan] | \n",
446 | " 4.70081 | \n",
447 | "
\n",
448 | " \n",
449 | "
\n",
450 | "
"
451 | ],
452 | "text/plain": [
453 | " Association Lift\n",
454 | "128 [olive oil, whole wheat pasta, mineral water, ... 6.11586\n",
455 | "58 [olive oil, whole wheat pasta, mineral water] 6.11586\n",
456 | "96 [soup, mineral water, frozen vegetables, milk] 5.48441\n",
457 | "146 [soup, mineral water, frozen vegetables, nan, ... 5.48441\n",
458 | "28 [honey, fromage blanc, nan] 5.16427\n",
459 | "3 [honey, fromage blanc] 5.16427\n",
460 | "16 [chicken, nan, light cream] 4.84395\n",
461 | "0 [chicken, light cream] 4.84395\n",
462 | "2 [pasta, escalope] 4.70081\n",
463 | "26 [pasta, escalope, nan] 4.70081"
464 | ]
465 | },
466 | "execution_count": 9,
467 | "metadata": {},
468 | "output_type": "execute_result"
469 | }
470 | ],
471 | "source": [
472 | "# Show top 10 higher lift scores\n",
473 | "rank.sort_values('Lift', ascending=False).head(10)"
474 | ]
475 | },
476 | {
477 | "cell_type": "markdown",
478 | "metadata": {},
479 | "source": [
480 | "By the study, \"olive oil, whole wheat pasta, mineral water\" are the most commom combined items from this week for the supermarket in question. "
481 | ]
482 | },
483 | {
484 | "cell_type": "markdown",
485 | "metadata": {},
486 | "source": [
487 | "## ECLAT Implementation\n",
488 | "\n",
489 | "This is an implementation of the ECLAT code by hand. It calculate the pairs that have been bought more frequently comparing to other pairs. At the end, we expect to see what is the most common combination of products during the week. \n",
490 | "\n",
491 | "An extension of the code can calculate the three most common combination, 4, and so on."
492 | ]
493 | },
494 | {
495 | "cell_type": "markdown",
496 | "metadata": {},
497 | "source": [
498 | "#### Getting the list of products bought this week by all customers"
499 | ]
500 | },
501 | {
502 | "cell_type": "code",
503 | "execution_count": 10,
504 | "metadata": {
505 | "collapsed": false
506 | },
507 | "outputs": [],
508 | "source": [
509 | "# Putting all transactions in a single list\n",
510 | "itens = []\n",
511 | "for i in range(0, len(transactions)):\n",
512 | " itens.extend(transactions[i])\n",
513 | "\n",
514 | "# Finding unique items from transactions and removing nan\n",
515 | "uniqueItems = list(set(itens))\n",
516 | "uniqueItems.remove('nan')"
517 | ]
518 | },
519 | {
520 | "cell_type": "code",
521 | "execution_count": 11,
522 | "metadata": {
523 | "collapsed": false
524 | },
525 | "outputs": [],
526 | "source": [
527 | "# test code\n",
528 | "#tra = [s for s in transactions if (\"mineral water\") in s and (\"ground beef\") in s and (\"shrimp\") in s]"
529 | ]
530 | },
531 | {
532 | "cell_type": "markdown",
533 | "metadata": {},
534 | "source": [
535 | "#### Creating combinations with the items - pairs"
536 | ]
537 | },
538 | {
539 | "cell_type": "code",
540 | "execution_count": 12,
541 | "metadata": {
542 | "collapsed": false
543 | },
544 | "outputs": [],
545 | "source": [
546 | "pair = []\n",
547 | "for j in range(0, len(uniqueItems)):\n",
548 | " k = 1;\n",
549 | " while k <= len(uniqueItems):\n",
550 | " try:\n",
551 | " pair.append([uniqueItems[j], uniqueItems[j+k]])\n",
552 | " except IndexError:\n",
553 | " pass\n",
554 | " k = k + 1; "
555 | ]
556 | },
557 | {
558 | "cell_type": "markdown",
559 | "metadata": {},
560 | "source": [
561 | "#### Calculating score\n",
562 | "The calculation is done looking at the number of customers that bought both items (the pair) and divided by all customers of the week (7501). This calculation is done for all pairs possible and the score is returned on \"score\" list.\n",
563 | "\n",
564 | " . \n",
565 | " *** score = (# lists that contain [item x and item y]) / (# all lists) ***"
566 | ]
567 | },
568 | {
569 | "cell_type": "code",
570 | "execution_count": 13,
571 | "metadata": {
572 | "collapsed": false
573 | },
574 | "outputs": [],
575 | "source": [
576 | "score = []\n",
577 | "for i in pair:\n",
578 | " cond = []\n",
579 | " for item in i:\n",
580 | " cond.append('(\"%s\") in s' %item)\n",
581 | " mycode = ('[s for s in transactions if ' + ' and '.join(cond) + ']')\n",
582 | " #mycode = \"print 'hello world'\"\n",
583 | " score.append(len(eval(mycode))/7501.)"
584 | ]
585 | },
586 | {
587 | "cell_type": "markdown",
588 | "metadata": {},
589 | "source": [
590 | "#### Showing results\n",
591 | "\n",
592 | "Top 10 Most common pairs of items of this week"
593 | ]
594 | },
595 | {
596 | "cell_type": "code",
597 | "execution_count": 14,
598 | "metadata": {
599 | "collapsed": false
600 | },
601 | "outputs": [],
602 | "source": [
603 | "ranking_ECLAT = pd.DataFrame([pair, score]).T\n",
604 | "ranking_ECLAT.columns = ['Pair', 'Score']"
605 | ]
606 | },
607 | {
608 | "cell_type": "code",
609 | "execution_count": 15,
610 | "metadata": {
611 | "collapsed": false
612 | },
613 | "outputs": [
614 | {
615 | "data": {
616 | "text/html": [
617 | "\n",
618 | "
\n",
619 | " \n",
620 | " \n",
621 | " | \n",
622 | " Pair | \n",
623 | " Score | \n",
624 | "
\n",
625 | " \n",
626 | " \n",
627 | " \n",
628 | " 3809 | \n",
629 | " [spaghetti, mineral water] | \n",
630 | " 0.0597254 | \n",
631 | "
\n",
632 | " \n",
633 | " 6389 | \n",
634 | " [chocolate, mineral water] | \n",
635 | " 0.0526596 | \n",
636 | "
\n",
637 | " \n",
638 | " 7096 | \n",
639 | " [mineral water, eggs] | \n",
640 | " 0.0509265 | \n",
641 | "
\n",
642 | " \n",
643 | " 689 | \n",
644 | " [milk, mineral water] | \n",
645 | " 0.0479936 | \n",
646 | "
\n",
647 | " \n",
648 | " 6002 | \n",
649 | " [ground beef, mineral water] | \n",
650 | " 0.0409279 | \n",
651 | "
\n",
652 | " \n",
653 | " 3779 | \n",
654 | " [spaghetti, chocolate] | \n",
655 | " 0.0391948 | \n",
656 | "
\n",
657 | " \n",
658 | " 3770 | \n",
659 | " [spaghetti, ground beef] | \n",
660 | " 0.0391948 | \n",
661 | "
\n",
662 | " \n",
663 | " 3811 | \n",
664 | " [spaghetti, eggs] | \n",
665 | " 0.0365285 | \n",
666 | "
\n",
667 | " \n",
668 | " 6604 | \n",
669 | " [french fries, eggs] | \n",
670 | " 0.0363951 | \n",
671 | "
\n",
672 | " \n",
673 | " 1877 | \n",
674 | " [frozen vegetables, mineral water] | \n",
675 | " 0.0357286 | \n",
676 | "
\n",
677 | " \n",
678 | "
\n",
679 | "
"
680 | ],
681 | "text/plain": [
682 | " Pair Score\n",
683 | "3809 [spaghetti, mineral water] 0.0597254\n",
684 | "6389 [chocolate, mineral water] 0.0526596\n",
685 | "7096 [mineral water, eggs] 0.0509265\n",
686 | "689 [milk, mineral water] 0.0479936\n",
687 | "6002 [ground beef, mineral water] 0.0409279\n",
688 | "3779 [spaghetti, chocolate] 0.0391948\n",
689 | "3770 [spaghetti, ground beef] 0.0391948\n",
690 | "3811 [spaghetti, eggs] 0.0365285\n",
691 | "6604 [french fries, eggs] 0.0363951\n",
692 | "1877 [frozen vegetables, mineral water] 0.0357286"
693 | ]
694 | },
695 | "execution_count": 15,
696 | "metadata": {},
697 | "output_type": "execute_result"
698 | }
699 | ],
700 | "source": [
701 | "ranking_ECLAT.sort_values('Score', ascending=False).head(10)"
702 | ]
703 | },
704 | {
705 | "cell_type": "markdown",
706 | "metadata": {},
707 | "source": [
708 | "### What if we do that for trios?"
709 | ]
710 | },
711 | {
712 | "cell_type": "code",
713 | "execution_count": 27,
714 | "metadata": {
715 | "collapsed": true
716 | },
717 | "outputs": [],
718 | "source": [
719 | "# Creating trios\n",
720 | "trio = []\n",
721 | "for j in range(0, len(uniqueItems)):\n",
722 | " for k in range(j, len(uniqueItems)):\n",
723 | " for l in range(k, len(uniqueItems)):\n",
724 | " if (k != j) and (j != l) and (k != l):\n",
725 | " try:\n",
726 | " trio.append([uniqueItems[j], uniqueItems[j+k], uniqueItems[j+l]])\n",
727 | " except IndexError:\n",
728 | " pass "
729 | ]
730 | },
731 | {
732 | "cell_type": "code",
733 | "execution_count": 29,
734 | "metadata": {
735 | "collapsed": false,
736 | "scrolled": false
737 | },
738 | "outputs": [
739 | {
740 | "data": {
741 | "text/plain": [
742 | "[['pet food', 'green tea', 'whole wheat rice'],\n",
743 | " ['pet food', 'green tea', 'antioxydant juice'],\n",
744 | " ['pet food', 'green tea', 'chicken'],\n",
745 | " ['pet food', 'green tea', 'milk'],\n",
746 | " ['pet food', 'green tea', 'mint green tea']]"
747 | ]
748 | },
749 | "execution_count": 29,
750 | "metadata": {},
751 | "output_type": "execute_result"
752 | }
753 | ],
754 | "source": [
755 | "trio[:5]"
756 | ]
757 | },
758 | {
759 | "cell_type": "code",
760 | "execution_count": 30,
761 | "metadata": {
762 | "collapsed": true
763 | },
764 | "outputs": [],
765 | "source": [
766 | "score_trio = []\n",
767 | "for i in trio:\n",
768 | " cond = []\n",
769 | " for item in i:\n",
770 | " cond.append('(\"%s\") in s' %item)\n",
771 | " mycode = ('[s for s in transactions if ' + ' and '.join(cond) + ']')\n",
772 | " #mycode = \"print 'hello world'\"\n",
773 | " score_trio.append(len(eval(mycode))/7501.)"
774 | ]
775 | },
776 | {
777 | "cell_type": "code",
778 | "execution_count": 31,
779 | "metadata": {
780 | "collapsed": false
781 | },
782 | "outputs": [
783 | {
784 | "data": {
785 | "text/html": [
786 | "\n",
787 | "
\n",
788 | " \n",
789 | " \n",
790 | " | \n",
791 | " Trio | \n",
792 | " Score | \n",
793 | "
\n",
794 | " \n",
795 | " \n",
796 | " \n",
797 | " 134586 | \n",
798 | " [spaghetti, chocolate, mineral water] | \n",
799 | " 0.0158646 | \n",
800 | "
\n",
801 | " \n",
802 | " 35350 | \n",
803 | " [milk, spaghetti, mineral water] | \n",
804 | " 0.0157312 | \n",
805 | "
\n",
806 | " \n",
807 | " 135293 | \n",
808 | " [spaghetti, mineral water, eggs] | \n",
809 | " 0.0142648 | \n",
810 | "
\n",
811 | " \n",
812 | " 37930 | \n",
813 | " [milk, chocolate, mineral water] | \n",
814 | " 0.0139981 | \n",
815 | "
\n",
816 | " \n",
817 | " 38637 | \n",
818 | " [milk, mineral water, eggs] | \n",
819 | " 0.0130649 | \n",
820 | "
\n",
821 | " \n",
822 | " 86786 | \n",
823 | " [frozen vegetables, spaghetti, mineral water] | \n",
824 | " 0.0119984 | \n",
825 | "
\n",
826 | " \n",
827 | " 37543 | \n",
828 | " [milk, ground beef, mineral water] | \n",
829 | " 0.0110652 | \n",
830 | "
\n",
831 | " \n",
832 | " 33418 | \n",
833 | " [milk, frozen vegetables, mineral water] | \n",
834 | " 0.0110652 | \n",
835 | "
\n",
836 | " \n",
837 | " 35320 | \n",
838 | " [milk, spaghetti, chocolate] | \n",
839 | " 0.0109319 | \n",
840 | "
\n",
841 | " \n",
842 | " 134588 | \n",
843 | " [spaghetti, chocolate, eggs] | \n",
844 | " 0.0105319 | \n",
845 | "
\n",
846 | " \n",
847 | "
\n",
848 | "
"
849 | ],
850 | "text/plain": [
851 | " Trio Score\n",
852 | "134586 [spaghetti, chocolate, mineral water] 0.0158646\n",
853 | "35350 [milk, spaghetti, mineral water] 0.0157312\n",
854 | "135293 [spaghetti, mineral water, eggs] 0.0142648\n",
855 | "37930 [milk, chocolate, mineral water] 0.0139981\n",
856 | "38637 [milk, mineral water, eggs] 0.0130649\n",
857 | "86786 [frozen vegetables, spaghetti, mineral water] 0.0119984\n",
858 | "37543 [milk, ground beef, mineral water] 0.0110652\n",
859 | "33418 [milk, frozen vegetables, mineral water] 0.0110652\n",
860 | "35320 [milk, spaghetti, chocolate] 0.0109319\n",
861 | "134588 [spaghetti, chocolate, eggs] 0.0105319"
862 | ]
863 | },
864 | "execution_count": 31,
865 | "metadata": {},
866 | "output_type": "execute_result"
867 | }
868 | ],
869 | "source": [
870 | "ranking_ECLAT_trio = pd.DataFrame([trio, score_trio]).T\n",
871 | "ranking_ECLAT_trio.columns = ['Trio', 'Score']\n",
872 | "ranking_ECLAT_trio.sort_values('Score', ascending=False).head(10)"
873 | ]
874 | },
875 | {
876 | "cell_type": "markdown",
877 | "metadata": {},
878 | "source": [
879 | "## What about comparing the results from Apriori and ECLAT?"
880 | ]
881 | },
882 | {
883 | "cell_type": "markdown",
884 | "metadata": {},
885 | "source": [
886 | "We got from Apriori that the combination that lead to more \"attractiveness power\" is \"olive oil\", \"whole wheat pasta\" and \"mineral water\". If we run the ECLAT code for this set of items, we will obtain: 0.0039.\n",
887 | "\n",
888 | "This score of 3 items has not enough score to be placed among top 10, but they are measuring different metrics. According to apriori these are the items that when picked one lead to another items more frequently than other combinations, i.e. when a person pick 'olive oil', the probability of picking 'whole wheat pasta' and 'mineral water' is much higher than picking another combination. ECLAT in another hand is just sorting as the most common combinations of all lists, not caring about how one item isolatedly can influence in the purchase of another."
889 | ]
890 | },
891 | {
892 | "cell_type": "code",
893 | "execution_count": 33,
894 | "metadata": {
895 | "collapsed": false
896 | },
897 | "outputs": [
898 | {
899 | "name": "stdout",
900 | "output_type": "stream",
901 | "text": [
902 | "\n"
903 | ]
904 | }
905 | ],
906 | "source": [
907 | "i = [\"olive oil\", \"whole wheat pasta\", \"mineral water\"]\n",
908 | "cond = []\n",
909 | "for item in i:\n",
910 | " cond.append('(\"%s\") in s' %item)\n",
911 | "mycode = ('[s for s in transactions if ' + ' and '.join(cond) + ']')\n",
912 | "#mycode = \"print 'hello world'\"\n",
913 | "tra = eval(mycode)"
914 | ]
915 | },
916 | {
917 | "cell_type": "code",
918 | "execution_count": 34,
919 | "metadata": {
920 | "collapsed": false
921 | },
922 | "outputs": [
923 | {
924 | "name": "stdout",
925 | "output_type": "stream",
926 | "text": [
927 | "Score for \"olive oil\", \"whole wheat pasta\", \"mineral water\": 0.00386615117984\n"
928 | ]
929 | }
930 | ],
931 | "source": [
932 | "print 'Score for \"olive oil\", \"whole wheat pasta\", \"mineral water\":', len(tra)/7501."
933 | ]
934 | },
935 | {
936 | "cell_type": "code",
937 | "execution_count": null,
938 | "metadata": {
939 | "collapsed": true
940 | },
941 | "outputs": [],
942 | "source": []
943 | }
944 | ],
945 | "metadata": {
946 | "anaconda-cloud": {},
947 | "kernelspec": {
948 | "display_name": "Python 2",
949 | "language": "python",
950 | "name": "python2"
951 | },
952 | "language_info": {
953 | "codemirror_mode": {
954 | "name": "ipython",
955 | "version": 2
956 | },
957 | "file_extension": ".py",
958 | "mimetype": "text/x-python",
959 | "name": "python",
960 | "nbconvert_exporter": "python",
961 | "pygments_lexer": "ipython2",
962 | "version": "2.7.12"
963 | }
964 | },
965 | "nbformat": 4,
966 | "nbformat_minor": 1
967 | }
968 |
--------------------------------------------------------------------------------
/ECLAT Pair.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amyoshino/Recommendation-System-with-Apriori-and-ECLAT/64b1358c26fdd9461bf7344ea095c873d1e9381a/ECLAT Pair.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Apriori and ECLAT application to build recommendation system (product allocation strategy)
2 |
3 | Trained association rule models (Apriori and ECLAT) to find the most related items bought by customers of a french supermarket during a week. Each of 7501 lines on the dataset represent items that an unique customer bought during the week.
4 |
5 | The dataset and code are based on a lecture from the course: Machine Learning A-Z™ by Kirill Eremenko https://www.udemy.com/machinelearning/learn/v4/overview. I modified the code to do some transformations and fit data into dataframes to make the visualization easier, and also developed ECLAT algorithm from scratch.
6 |
7 | ### Apriori:
8 | This algorithm associate products preferences by most of the customers and can be used to generate products recommendation and help on displaying products strategy.
9 |
10 | ### ECLAT:
11 | In this project I implemented the ECLAT algorithm by hand. It calculate the pairs that have been bought more frequently comparing to other pairs. At the end, we expect to see what is the most common combination of products during the week.
12 |
13 | ### Results:
14 |
15 | #### Apriori:
16 | The table show the "lift" of every combination of products for Apriori algorithm. From the table we can see that the combination that lead to more "attractiveness power" is "olive oil", "whole wheat pasta" and "mineral water", meaning that if one of the products are picked, the likelihood of picking other products is higher. It can be used by the market to position the products closer (or farther), depending on the sales strategy.
17 |
18 |
19 |
20 | #### ECLAT:
21 | ECLAT sort the most common combinations of all lists, not caring about how one item isolatedly can influence in the purchase of another. The score power shown in table below show those most picked products.
22 |
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/apyori.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | """
4 | a simple implementation of Apriori algorithm by Python.
5 | """
6 |
7 | import sys
8 | import csv
9 | import argparse
10 | import json
11 | import os
12 | from collections import namedtuple
13 | from itertools import combinations
14 | from itertools import chain
15 |
16 |
17 | # Meta informations.
18 | __version__ = '1.1.1'
19 | __author__ = 'Yu Mochizuki'
20 | __author_email__ = 'ymoch.dev@gmail.com'
21 |
22 |
23 | ################################################################################
24 | # Data structures.
25 | ################################################################################
26 | class TransactionManager(object):
27 | """
28 | Transaction managers.
29 | """
30 |
31 | def __init__(self, transactions):
32 | """
33 | Initialize.
34 |
35 | Arguments:
36 | transactions -- A transaction iterable object
37 | (eg. [['A', 'B'], ['B', 'C']]).
38 | """
39 | self.__num_transaction = 0
40 | self.__items = []
41 | self.__transaction_index_map = {}
42 |
43 | for transaction in transactions:
44 | self.add_transaction(transaction)
45 |
46 | def add_transaction(self, transaction):
47 | """
48 | Add a transaction.
49 |
50 | Arguments:
51 | transaction -- A transaction as an iterable object (eg. ['A', 'B']).
52 | """
53 | for item in transaction:
54 | if item not in self.__transaction_index_map:
55 | self.__items.append(item)
56 | self.__transaction_index_map[item] = set()
57 | self.__transaction_index_map[item].add(self.__num_transaction)
58 | self.__num_transaction += 1
59 |
60 | def calc_support(self, items):
61 | """
62 | Returns a support for items.
63 |
64 | Arguments:
65 | items -- Items as an iterable object (eg. ['A', 'B']).
66 | """
67 | # Empty items is supported by all transactions.
68 | if not items:
69 | return 1.0
70 |
71 | # Empty transactions supports no items.
72 | if not self.num_transaction:
73 | return 0.0
74 |
75 | # Create the transaction index intersection.
76 | sum_indexes = None
77 | for item in items:
78 | indexes = self.__transaction_index_map.get(item)
79 | if indexes is None:
80 | # No support for any set that contains a not existing item.
81 | return 0.0
82 |
83 | if sum_indexes is None:
84 | # Assign the indexes on the first time.
85 | sum_indexes = indexes
86 | else:
87 | # Calculate the intersection on not the first time.
88 | sum_indexes = sum_indexes.intersection(indexes)
89 |
90 | # Calculate and return the support.
91 | return float(len(sum_indexes)) / self.__num_transaction
92 |
93 | def initial_candidates(self):
94 | """
95 | Returns the initial candidates.
96 | """
97 | return [frozenset([item]) for item in self.items]
98 |
99 | @property
100 | def num_transaction(self):
101 | """
102 | Returns the number of transactions.
103 | """
104 | return self.__num_transaction
105 |
106 | @property
107 | def items(self):
108 | """
109 | Returns the item list that the transaction is consisted of.
110 | """
111 | return sorted(self.__items)
112 |
113 | @staticmethod
114 | def create(transactions):
115 | """
116 | Create the TransactionManager with a transaction instance.
117 | If the given instance is a TransactionManager, this returns itself.
118 | """
119 | if isinstance(transactions, TransactionManager):
120 | return transactions
121 | return TransactionManager(transactions)
122 |
123 |
124 | # Ignore name errors because these names are namedtuples.
125 | SupportRecord = namedtuple( # pylint: disable=C0103
126 | 'SupportRecord', ('items', 'support'))
127 | RelationRecord = namedtuple( # pylint: disable=C0103
128 | 'RelationRecord', SupportRecord._fields + ('ordered_statistics',))
129 | OrderedStatistic = namedtuple( # pylint: disable=C0103
130 | 'OrderedStatistic', ('items_base', 'items_add', 'confidence', 'lift',))
131 |
132 |
133 | ################################################################################
134 | # Inner functions.
135 | ################################################################################
136 | def create_next_candidates(prev_candidates, length):
137 | """
138 | Returns the apriori candidates as a list.
139 |
140 | Arguments:
141 | prev_candidates -- Previous candidates as a list.
142 | length -- The lengths of the next candidates.
143 | """
144 | # Solve the items.
145 | item_set = set()
146 | for candidate in prev_candidates:
147 | for item in candidate:
148 | item_set.add(item)
149 | items = sorted(item_set)
150 |
151 | # Create the temporary candidates. These will be filtered below.
152 | tmp_next_candidates = (frozenset(x) for x in combinations(items, length))
153 |
154 | # Return all the candidates if the length of the next candidates is 2
155 | # because their subsets are the same as items.
156 | if length < 3:
157 | return list(tmp_next_candidates)
158 |
159 | # Filter candidates that all of their subsets are
160 | # in the previous candidates.
161 | next_candidates = [
162 | candidate for candidate in tmp_next_candidates
163 | if all(
164 | True if frozenset(x) in prev_candidates else False
165 | for x in combinations(candidate, length - 1))
166 | ]
167 | return next_candidates
168 |
169 |
170 | def gen_support_records(transaction_manager, min_support, **kwargs):
171 | """
172 | Returns a generator of support records with given transactions.
173 |
174 | Arguments:
175 | transaction_manager -- Transactions as a TransactionManager instance.
176 | min_support -- A minimum support (float).
177 |
178 | Keyword arguments:
179 | max_length -- The maximum length of relations (integer).
180 | """
181 | # Parse arguments.
182 | max_length = kwargs.get('max_length')
183 |
184 | # For testing.
185 | _create_next_candidates = kwargs.get(
186 | '_create_next_candidates', create_next_candidates)
187 |
188 | # Process.
189 | candidates = transaction_manager.initial_candidates()
190 | length = 1
191 | while candidates:
192 | relations = set()
193 | for relation_candidate in candidates:
194 | support = transaction_manager.calc_support(relation_candidate)
195 | if support < min_support:
196 | continue
197 | candidate_set = frozenset(relation_candidate)
198 | relations.add(candidate_set)
199 | yield SupportRecord(candidate_set, support)
200 | length += 1
201 | if max_length and length > max_length:
202 | break
203 | candidates = _create_next_candidates(relations, length)
204 |
205 |
206 | def gen_ordered_statistics(transaction_manager, record):
207 | """
208 | Returns a generator of ordered statistics as OrderedStatistic instances.
209 |
210 | Arguments:
211 | transaction_manager -- Transactions as a TransactionManager instance.
212 | record -- A support record as a SupportRecord instance.
213 | """
214 | items = record.items
215 | for combination_set in combinations(sorted(items), len(items) - 1):
216 | items_base = frozenset(combination_set)
217 | items_add = frozenset(items.difference(items_base))
218 | confidence = (
219 | record.support / transaction_manager.calc_support(items_base))
220 | lift = confidence / transaction_manager.calc_support(items_add)
221 | yield OrderedStatistic(
222 | frozenset(items_base), frozenset(items_add), confidence, lift)
223 |
224 |
225 | def filter_ordered_statistics(ordered_statistics, **kwargs):
226 | """
227 | Filter OrderedStatistic objects.
228 |
229 | Arguments:
230 | ordered_statistics -- A OrderedStatistic iterable object.
231 |
232 | Keyword arguments:
233 | min_confidence -- The minimum confidence of relations (float).
234 | min_lift -- The minimum lift of relations (float).
235 | """
236 | min_confidence = kwargs.get('min_confidence', 0.0)
237 | min_lift = kwargs.get('min_lift', 0.0)
238 |
239 | for ordered_statistic in ordered_statistics:
240 | if ordered_statistic.confidence < min_confidence:
241 | continue
242 | if ordered_statistic.lift < min_lift:
243 | continue
244 | yield ordered_statistic
245 |
246 |
247 | ################################################################################
248 | # API function.
249 | ################################################################################
250 | def apriori(transactions, **kwargs):
251 | """
252 | Executes Apriori algorithm and returns a RelationRecord generator.
253 |
254 | Arguments:
255 | transactions -- A transaction iterable object
256 | (eg. [['A', 'B'], ['B', 'C']]).
257 |
258 | Keyword arguments:
259 | min_support -- The minimum support of relations (float).
260 | min_confidence -- The minimum confidence of relations (float).
261 | min_lift -- The minimum lift of relations (float).
262 | max_length -- The maximum length of the relation (integer).
263 | """
264 | # Parse the arguments.
265 | min_support = kwargs.get('min_support', 0.1)
266 | min_confidence = kwargs.get('min_confidence', 0.0)
267 | min_lift = kwargs.get('min_lift', 0.0)
268 | max_length = kwargs.get('max_length', None)
269 |
270 | # Check arguments.
271 | if min_support <= 0:
272 | raise ValueError('minimum support must be > 0')
273 |
274 | # For testing.
275 | _gen_support_records = kwargs.get(
276 | '_gen_support_records', gen_support_records)
277 | _gen_ordered_statistics = kwargs.get(
278 | '_gen_ordered_statistics', gen_ordered_statistics)
279 | _filter_ordered_statistics = kwargs.get(
280 | '_filter_ordered_statistics', filter_ordered_statistics)
281 |
282 | # Calculate supports.
283 | transaction_manager = TransactionManager.create(transactions)
284 | support_records = _gen_support_records(
285 | transaction_manager, min_support, max_length=max_length)
286 |
287 | # Calculate ordered stats.
288 | for support_record in support_records:
289 | ordered_statistics = list(
290 | _filter_ordered_statistics(
291 | _gen_ordered_statistics(transaction_manager, support_record),
292 | min_confidence=min_confidence,
293 | min_lift=min_lift,
294 | )
295 | )
296 | if not ordered_statistics:
297 | continue
298 | yield RelationRecord(
299 | support_record.items, support_record.support, ordered_statistics)
300 |
301 |
302 | ################################################################################
303 | # Application functions.
304 | ################################################################################
305 | def parse_args(argv):
306 | """
307 | Parse commandline arguments.
308 |
309 | Arguments:
310 | argv -- An argument list without the program name.
311 | """
312 | output_funcs = {
313 | 'json': dump_as_json,
314 | 'tsv': dump_as_two_item_tsv,
315 | }
316 | default_output_func_key = 'json'
317 |
318 | parser = argparse.ArgumentParser()
319 | parser.add_argument(
320 | '-v', '--version', action='version',
321 | version='%(prog)s {0}'.format(__version__))
322 | parser.add_argument(
323 | 'input', metavar='inpath', nargs='*',
324 | help='Input transaction file (default: stdin).',
325 | type=argparse.FileType('r'), default=[sys.stdin])
326 | parser.add_argument(
327 | '-o', '--output', metavar='outpath',
328 | help='Output file (default: stdout).',
329 | type=argparse.FileType('w'), default=sys.stdout)
330 | parser.add_argument(
331 | '-l', '--max-length', metavar='int',
332 | help='Max length of relations (default: infinite).',
333 | type=int, default=None)
334 | parser.add_argument(
335 | '-s', '--min-support', metavar='float',
336 | help='Minimum support ratio (must be > 0, default: 0.1).',
337 | type=float, default=0.1)
338 | parser.add_argument(
339 | '-c', '--min-confidence', metavar='float',
340 | help='Minimum confidence (default: 0.5).',
341 | type=float, default=0.5)
342 | parser.add_argument(
343 | '-t', '--min-lift', metavar='float',
344 | help='Minimum lift (default: 0.0).',
345 | type=float, default=0.0)
346 | parser.add_argument(
347 | '-d', '--delimiter', metavar='str',
348 | help='Delimiter for items of transactions (default: tab).',
349 | type=str, default='\t')
350 | parser.add_argument(
351 | '-f', '--out-format', metavar='str',
352 | help='Output format ({0}; default: {1}).'.format(
353 | ', '.join(output_funcs.keys()), default_output_func_key),
354 | type=str, choices=output_funcs.keys(), default=default_output_func_key)
355 | args = parser.parse_args(argv)
356 |
357 | args.output_func = output_funcs[args.out_format]
358 | return args
359 |
360 |
361 | def load_transactions(input_file, **kwargs):
362 | """
363 | Load transactions and returns a generator for transactions.
364 |
365 | Arguments:
366 | input_file -- An input file.
367 |
368 | Keyword arguments:
369 | delimiter -- The delimiter of the transaction.
370 | """
371 | delimiter = kwargs.get('delimiter', '\t')
372 | for transaction in csv.reader(input_file, delimiter=delimiter):
373 | yield transaction if transaction else ['']
374 |
375 |
376 | def dump_as_json(record, output_file):
377 | """
378 | Dump an relation record as a json value.
379 |
380 | Arguments:
381 | record -- A RelationRecord instance to dump.
382 | output_file -- A file to output.
383 | """
384 | def default_func(value):
385 | """
386 | Default conversion for JSON value.
387 | """
388 | if isinstance(value, frozenset):
389 | return sorted(value)
390 | raise TypeError(repr(value) + " is not JSON serializable")
391 |
392 | converted_record = record._replace(
393 | ordered_statistics=[x._asdict() for x in record.ordered_statistics])
394 | json.dump(
395 | converted_record._asdict(), output_file,
396 | default=default_func, ensure_ascii=False)
397 | output_file.write(os.linesep)
398 |
399 |
400 | def dump_as_two_item_tsv(record, output_file):
401 | """
402 | Dump a relation record as TSV only for 2 item relations.
403 |
404 | Arguments:
405 | record -- A RelationRecord instance to dump.
406 | output_file -- A file to output.
407 | """
408 | for ordered_stats in record.ordered_statistics:
409 | if len(ordered_stats.items_base) != 1:
410 | continue
411 | if len(ordered_stats.items_add) != 1:
412 | continue
413 | output_file.write('{0}\t{1}\t{2:.8f}\t{3:.8f}\t{4:.8f}{5}'.format(
414 | list(ordered_stats.items_base)[0], list(ordered_stats.items_add)[0],
415 | record.support, ordered_stats.confidence, ordered_stats.lift,
416 | os.linesep))
417 |
418 |
419 | def main(**kwargs):
420 | """
421 | Executes Apriori algorithm and print its result.
422 | """
423 | # For tests.
424 | _parse_args = kwargs.get('_parse_args', parse_args)
425 | _load_transactions = kwargs.get('_load_transactions', load_transactions)
426 | _apriori = kwargs.get('_apriori', apriori)
427 |
428 | args = _parse_args(sys.argv[1:])
429 | transactions = _load_transactions(
430 | chain(*args.input), delimiter=args.delimiter)
431 | result = _apriori(
432 | transactions,
433 | max_length=args.max_length,
434 | min_support=args.min_support,
435 | min_confidence=args.min_confidence)
436 | for record in result:
437 | args.output_func(record, args.output)
438 |
439 |
440 | if __name__ == '__main__':
441 | main()
442 |
--------------------------------------------------------------------------------