├── LICENSE ├── MANIFEST.in ├── README.md ├── docs ├── images │ ├── 8cats.png │ ├── affineDemo.mp4 │ ├── affineDemo.webm │ ├── background_resize.jpg │ ├── cat_original.png │ ├── cats image.jpg │ ├── compositeMatching.png │ ├── dog1_resize3.jpg │ ├── morty.mp4 │ └── morty.webm ├── index.html ├── jspolygon.js ├── main.js ├── math.js ├── moment.min.js └── style.css ├── fullEndToEndDemo ├── CMakeLists.txt ├── include │ └── hiredis │ │ ├── async.h │ │ ├── dict.h │ │ ├── fmacros.h │ │ ├── hiredis.h │ │ ├── net.h │ │ ├── read.h │ │ ├── sds.h │ │ ├── sdsalloc.h │ │ └── win32.h ├── inputImages │ ├── 8cats.png │ ├── cat1.png │ ├── cat2.png │ ├── cat3.png │ ├── cat4.png │ ├── cat5.png │ ├── cat6.png │ ├── cat7.png │ ├── cat8.png │ ├── cat_original.png │ ├── mona.jpg │ ├── monaComposite.jpg │ └── van_gogh.jpg ├── lib │ └── libhiredis.a ├── runDemo ├── runDemo1.sh ├── runDemo2.sh ├── setup.sh └── src │ ├── FragmentHash.h │ ├── Keypoint.h │ ├── PerceptualHash.h │ ├── PerceptualHash_Fast.h │ ├── ShapeAndPositionInvariantImage.h │ ├── Triangle.h │ ├── curvature.py │ ├── dumpKeypointsToJson.py │ ├── img_hash_opencv_module │ ├── PHash_Fast.cpp │ ├── PHash_Fast.h │ ├── img_hash.hpp │ ├── img_hash_base.hpp │ ├── phash.cpp │ ├── phash.hpp │ ├── precomp.hpp │ └── precomp.hpp~ │ ├── main.cc │ ├── mainImageProcessingFunctions.hpp │ └── utils.hpp ├── setup.py └── transformation_invariant_image_search ├── README.md ├── __init__.py ├── curvature.py ├── keypoints.py ├── main.py ├── phash.py └── requirements.txt /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Tom Murphy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | recursive-include transformation_invariant_image_search/templates * 3 | recursive-include transformation_invariant_image_search/static * 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | For more information you can check out the discussion here on [Hacker News](https://news.ycombinator.com/item?id=14973741) 2 | 3 | # Transformation-Invariant Reverse Image Search 4 | 5 | This repo demos a reverse image search algorithm which performs 2D affine transformation-invariant partial image-matching in sublinear time with respect to the number of images in our database. 6 | 7 | An online demo with a description of how the algorithm works is available here: 8 | [Demo](https://pippy360.github.io/transformationInvariantImageSearch) 9 | 10 | The /docs directory contains this front end javascript demo: https://pippy360.github.io/transformationInvariantImageSearch 11 | 12 | The /fullEndToEndDemo directory contains two full end to end c++ demos of the algorithm. 13 | 14 | The two end to end c++ demos use Redis as a database and do a direct hash lookup for the constant number of hashes produced for each query image. Each demo runs in O(1) time with respect to the number of images in the database. A nearest neighbor algorithm could also be used instead to find the closest hash within some threshold which would increase the accuracy but then the algorithm would run in amortized O(log n) time (depending on which NN algorithm was used). 15 | 16 | Processing each fragment/triangle of the image only requires the 3 points of the triangle and a read-only copy of the image so the preprocessing for an image is embarrassingly parallel. If implemented correctly there should be a near linear speedup with respect to the number of cores used. 17 | 18 | **However these demos were created quickly as a proof of concept and as a result are very slow. The demos show the alogrithm works and that it can work in O(1) time.** 19 | 20 | 21 | 22 | # Setup 23 | 24 | 25 | 26 | This setup was tested on a newly deployed vm on Debian GNU/Linux 9 (stretch), YMMV on different setups. 27 | 28 | Instead of running these commands manually you can run the ./setup.sh script while in the /fullEndToEndDemo directory. 29 | 30 | Or if you want to run the commands manually... 31 | 32 | ``` 33 | # From the root of the repo go to ./fullEndToEndDemo 34 | cd ./fullEndToEndDemo 35 | 36 | # Grab all the dependencies, this install is pretty huge 37 | sudo apt-get update 38 | sudo apt-get install git cmake g++ redis-server libboost-all-dev libopencv-dev python-opencv python-numpy python-scipy -y 39 | 40 | #Make it 41 | cmake . 42 | make 43 | 44 | # This step is optional. It removes a pointless annoying error opencv spits out 45 | # About: https://stackoverflow.com/questions/12689304/ctypes-error-libdc1394-error-failed-to-initialize-libdc1394 46 | sudo ln /dev/null /dev/raw1394 47 | 48 | # Then run either ./runDemo1.sh or ./runDemo2.sh to run the demo 49 | 50 | 51 | ``` 52 | 53 | # Python setup 54 | 55 | All credit for the python code goes to [rachmadaniHaryono](https://github.com/rachmadaniHaryono) and [meowcoder](https://github.com/meowcoder). 56 | 57 | This setup was tested on a newly deployed vm on Ubuntu 18.04 LTS, YMMV on different setups. 58 | 59 | To use python package, do the following: 60 | 61 | ``` 62 | sudo apt-get update 63 | sudo apt-get install python3-pip python3-opencv redis-server -y 64 | 65 | # On some systems this path is missing 66 | # read more here: https://github.com/pypa/pip/issues/3813 67 | PATH="$PATH:~/.local/bin" 68 | 69 | #cd to project directory 70 | pip3 install . 71 | ``` 72 | 73 | You also need install redis. 74 | 75 | # Demo 1 76 | 77 | 78 | To run this demo go to the /fullEndToEndDemo directory and run ./runDemo1.sh 79 | 80 | This demo shows the original image below matching the 8 transformed images below. Each image has some combination of 2D affine transformations applied to it. The demo inserts each of the 8 images individually into the database and then queries the database with the original image. 81 | 82 | 83 | ![Original Cat Image](https://pippy360.github.io/transformationInvariantImageSearch/images/cat_original.png) 84 | 85 | ![Transformed Cat Images](https://pippy360.github.io/transformationInvariantImageSearch/images/8cats.png) 86 | 87 | ## Output 88 | 89 | Here the 8 cats images are inserted first and then the database is queried with the orginal cat image. The original image matches all 8 images despite the transfomations. 90 | 91 | The low number of partial image matches is because we are doing direct hash lookups and so even a small bit of change (for example from antialising) can cause the perceptual hash to be ever so slightly off. Finding a closest hash using nearest neighbor would solve this issue. 92 | 93 | The demo takes 2 minutes (1 minute 38 seconds*) to run on a quad core VM but could run orders of magnitude faster with a better implementation. 94 | 95 | *Thanks to [meowcoder](https://github.com/meowcoder) for the speed up! 96 | 97 | ``` 98 | user@instance-1:~/transformationInvariantImageSearch/fullEndToEndDemo$ time ./runDemo1.sh 99 | Loading image: inputImages/cat1.png ... done 100 | Added 46725 image fragments to DB 101 | Loading image: inputImages/cat2.png ... done 102 | Added 65769 image fragments to DB 103 | Loading image: inputImages/cat3.png ... done 104 | Added 34179 image fragments to DB 105 | Loading image: inputImages/cat4.png ... done 106 | Added 44388 image fragments to DB 107 | Loading image: inputImages/cat5.png ... done 108 | Added 47799 image fragments to DB 109 | Loading image: inputImages/cat6.png ... done 110 | Added 44172 image fragments to DB 111 | Loading image: inputImages/cat7.png ... done 112 | Added 67131 image fragments to DB 113 | Loading image: inputImages/cat8.png ... done 114 | Added 18078 image fragments to DB 115 | Loading image: inputImages/cat_original.png ... done 116 | Added 30372 image fragments to DB 117 | Loading image: inputImages/cat_original.png ... done 118 | Matches: 119 | inputImages/cat1.png: 12 120 | inputImages/cat2.png: 16 121 | inputImages/cat3.png: 15 122 | inputImages/cat4.png: 1 123 | inputImages/cat5.png: 2 124 | inputImages/cat6.png: 4 125 | inputImages/cat7.png: 43 126 | inputImages/cat8.png: 18 127 | inputImages/cat_original.png: 30352 128 | Number of matches: 30463 129 | 130 | real 1m38.352s 131 | user 2m6.140s 132 | sys 0m6.592s 133 | ``` 134 | 135 | python example 136 | 137 | ```console 138 | $ time transformation-invariant-image-search insert fullEndToEndDemo/inputImages/cat* && \ 139 | time transformation-invariant-image-search lookup fullEndToEndDemo/inputImages/cat_original.png 140 | 141 | loading fullEndToEndDemo/inputImages/cat1.png 142 | 100%|██| 3/3 [00:07<00:00, 2.66s/it] 143 | 100%|██| 3/3 [00:08<00:00, 2.70s/it] 144 | 100%|█| 3/3 [00:00<00:00, 270.58it/s] 145 | 100%|| 1/1 [00:00<00:00, 2457.12it/s] 146 | added 58956 fragments for fullEndToEndDemo/inputImages/cat1.png 147 | loading fullEndToEndDemo/inputImages/cat2.png 148 | 100%|██| 3/3 [00:07<00:00, 2.64s/it] 149 | 100%|██| 3/3 [00:08<00:00, 2.76s/it] 150 | 100%|█| 3/3 [00:00<00:00, 149.91it/s] 151 | 100%|█| 1/1 [00:00<00:00, 902.00it/s] 152 | added 58486 fragments for fullEndToEndDemo/inputImages/cat2.png 153 | loading fullEndToEndDemo/inputImages/cat3.png 154 | 100%|█████████| 3/3 [00:04<00:00, 1.51s/it] 155 | 100%|█████████| 3/3 [00:04<00:00, 1.56s/it] 156 | 100%|█| 5025/5025 [00:01<00:00, 3570.22it/s] 157 | added 30141 fragments for fullEndToEndDemo/inputImages/cat3.png 158 | loading fullEndToEndDemo/inputImages/cat4.png 159 | 100%|███| 3/3 [00:07<00:00, 2.58s/it] 160 | 100%|███| 3/3 [00:07<00:00, 2.62s/it] 161 | 100%|██| 3/3 [00:00<00:00, 434.36it/s] 162 | 100%|█| 1/1 [00:00<00:00, 1709.87it/s] 163 | added 53013 fragments for fullEndToEndDemo/inputImages/cat4.png 164 | loading fullEndToEndDemo/inputImages/cat5.png 165 | 100%|█████████| 3/3 [00:08<00:00, 2.90s/it] 166 | 100%|█████████| 3/3 [00:09<00:00, 3.07s/it] 167 | 100%|█| 9420/9420 [00:02<00:00, 3238.60it/s] 168 | added 56493 fragments for fullEndToEndDemo/inputImages/cat5.png 169 | loading fullEndToEndDemo/inputImages/cat6.png 170 | 100%|█████████| 3/3 [00:07<00:00, 2.41s/it] 171 | 100%|█████████| 3/3 [00:07<00:00, 2.50s/it] 172 | 100%|█| 7347/7347 [00:02<00:00, 2953.52it/s] 173 | added 44030 fragments for fullEndToEndDemo/inputImages/cat6.png 174 | loading fullEndToEndDemo/inputImages/cat7.png 175 | 100%|███████████| 3/3 [00:11<00:00, 3.82s/it] 176 | 100%|███████████| 3/3 [00:11<00:00, 3.94s/it] 177 | 100%|█| 10544/10544 [00:04<00:00, 2393.00it/s] 178 | added 63089 fragments for fullEndToEndDemo/inputImages/cat7.png 179 | loading fullEndToEndDemo/inputImages/cat8.png 180 | 100%|█████████| 3/3 [00:03<00:00, 1.06s/it] 181 | 100%|█████████| 3/3 [00:03<00:00, 1.07s/it] 182 | 100%|█| 3160/3160 [00:01<00:00, 3138.56it/s] 183 | added 18899 fragments for fullEndToEndDemo/inputImages/cat8.png 184 | loading fullEndToEndDemo/inputImages/cat_original.png 185 | 100%|█████████| 3/3 [00:05<00:00, 1.93s/it] 186 | 100%|█████████| 3/3 [00:05<00:00, 1.94s/it] 187 | 100%|█| 5795/5795 [00:01<00:00, 3211.96it/s] 188 | added 34764 fragments for fullEndToEndDemo/inputImages/cat_original.png 189 | transformation-invariant-image-search insert fullEndToEndDemo/inputImages/cat 141,98s user 10,14s system 159% cpu 1:35,54 total 190 | loading fullEndToEndDemo/inputImages/cat_original.png 191 | 100%|█████████| 3/3 [00:05<00:00, 1.83s/it] 192 | 100%|█████████| 3/3 [00:05<00:00, 1.94s/it] 193 | 100%|█| 5795/5795 [00:01<00:00, 3221.91it/s] 194 | matches for fullEndToEndDemo/inputImages/cat_original.png: 195 | 34770 fullEndToEndDemo/inputImages/cat_original.png 196 | 237 fullEndToEndDemo/inputImages/cat7.png 197 | 36 fullEndToEndDemo/inputImages/cat2.png 198 | 19 fullEndToEndDemo/inputImages/cat4.png 199 | 14 fullEndToEndDemo/inputImages/cat8.png 200 | 7 fullEndToEndDemo/inputImages/cat1.png 201 | 4 fullEndToEndDemo/inputImages/cat3.png 202 | 2 fullEndToEndDemo/inputImages/cat5.png 203 | 1 fullEndToEndDemo/inputImages/cat6.png 204 | transformation-invariant-image-search lookup 12,71s user 1,62s system 151% cpu 9,472 total 205 | ``` 206 | 207 | # Demo 2 208 | 209 | 210 | To run this demo go to the /fullEndToEndDemo directory and run ./runDemo2.sh 211 | 212 | This demo shows partial image matching. The query image below (c) is a composite of images (a) and (b). The demo inserts images (a) and (b) into the database and then queries with image (c). Image (d) and (e) show the matching fragments, each coloured triangle is a fragment of the image that matched the composite image (c). 213 | 214 | ![Partial Image Match Example](https://pippy360.github.io/transformationInvariantImageSearch/images/compositeMatching.png) 215 | 216 | ## Output 217 | 218 | Here the two images mona.jpg and van_gogh.jpg are inserted into the database and then the database is queried with monaComposite.jpg. The demo takes 5 minutes 17 seconds (4 minutes 36 seconds*) to run on a quad core VM but could run orders of magnitude faster with a better implementation. 219 | 220 | *Thanks to [meowcoder](https://github.com/meowcoder) for the speed up! 221 | 222 | ``` 223 | user@instance-1:~/transformationInvariantImageSearch/fullEndToEndDemo$ time ./runDemo2.sh 224 | Loading image: ./inputImages/mona.jpg ... done 225 | Added 26991 image fragments to DB 226 | Loading image: ./inputImages/van_gogh.jpg ... done 227 | Added 1129896 image fragments to DB 228 | Loading image: ./inputImages/monaComposite.jpg ... done 229 | Matches: 230 | ./inputImages/mona.jpg: 5 231 | ./inputImages/van_gogh.jpg: 1478 232 | Number of matches: 1483 233 | 234 | real 4m36.635s 235 | user 6m50.988s 236 | sys 0m18.224s 237 | ``` 238 | 239 | python example 240 | 241 | ```console 242 | $ time transformation-invariant-image-search insert ./fullEndToEndDemo/inputImages/mona.jpg ./fullEndToEndDemo/inputImages/van_gogh.jpg && \ 243 | time transformation-invariant-image-search lookup ./fullEndToEndDemo/inputImages/monaComposite.jpg 244 | 245 | loading ./fullEndToEndDemo/inputImages/mona.jpg 246 | 100%|███| 3/3 [00:03<00:00, 1.24s/it] 247 | 100%|███| 3/3 [00:03<00:00, 1.20s/it] 248 | 100%|██| 3/3 [00:00<00:00, 302.48it/s] 249 | 100%|█| 1/1 [00:00<00:00, 2471.60it/s] 250 | added 24145 fragments for ./fullEndToEndDemo/inputImages/mona.jpg 251 | loading ./fullEndToEndDemo/inputImages/van_gogh.jpg 252 | 100%|█████████████| 3/3 [02:50<00:00, 56.01s/it] 253 | 100%|█████████████| 3/3 [02:50<00:00, 56.14s/it] 254 | 100%|█| 178267/178267 [00:56<00:00, 3170.20it/s] 255 | added 1058329 fragments for ./fullEndToEndDemo/inputImages/van_gogh.jpg 256 | transformation-invariant-image-search insert 384,51s user 12,84s system 168% cpu 3:56,42 total 257 | loading ./fullEndToEndDemo/inputImages/monaComposite.jpg 258 | 100%|███████████| 3/3 [01:01<00:00, 20.88s/it] 259 | 100%|███████████| 3/3 [01:01<00:00, 20.77s/it] 260 | 100%|█| 61563/61563 [00:19<00:00, 3129.92it/s] 261 | matches for ./fullEndToEndDemo/inputImages/monaComposite.jpg: 262 | 1332 ./fullEndToEndDemo/inputImages/van_gogh.jpg 263 | 11 ./fullEndToEndDemo/inputImages/mona.jpg 264 | transformation-invariant-image-search lookup 133,29s user 5,07s system 164% cpu 1:24,30 total 265 | ``` 266 | -------------------------------------------------------------------------------- /docs/images/8cats.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/docs/images/8cats.png -------------------------------------------------------------------------------- /docs/images/affineDemo.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/docs/images/affineDemo.mp4 -------------------------------------------------------------------------------- /docs/images/affineDemo.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/docs/images/affineDemo.webm -------------------------------------------------------------------------------- /docs/images/background_resize.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/docs/images/background_resize.jpg -------------------------------------------------------------------------------- /docs/images/cat_original.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/docs/images/cat_original.png -------------------------------------------------------------------------------- /docs/images/cats image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/docs/images/cats image.jpg -------------------------------------------------------------------------------- /docs/images/compositeMatching.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/docs/images/compositeMatching.png -------------------------------------------------------------------------------- /docs/images/dog1_resize3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/docs/images/dog1_resize3.jpg -------------------------------------------------------------------------------- /docs/images/morty.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/docs/images/morty.mp4 -------------------------------------------------------------------------------- /docs/images/morty.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/docs/images/morty.webm -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 11 | 13 | 14 | 15 | 16 | 17 |
18 |
19 | 20 | 21 | 22 | 23 | 24 | 25 |
26 |

Image Matching Demo

27 |

Click and drag on the query image to apply a transformation.

28 |
29 |
30 |
31 |
32 | 33 | 37 | 41 | 45 | 48 | 51 | 52 | 53 | 54 | 55 | 56 |
57 | 59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 | 73 |
74 | 78 |
79 |
80 | 82 | 83 | 84 | 85 |
86 |
87 |
88 | 89 |
90 | 94 |
95 |
96 | 98 | 99 | 100 | 101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 | 109 |
110 |
111 | 112 | 113 | 114 | 115 |
116 |
117 |
118 | 119 |
120 |
121 | 125 |
126 |
127 | 128 | 129 | 130 | 131 |
132 |
133 |
134 | 135 | 136 | 137 | 138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 | 150 |
151 |
152 |
153 |
154 |
155 |

About

156 |

157 | This demo showcases a reverse image search algorithm which performs 2D affine transformation-invariant partial image-matching 158 | in sublinear time. The algorithm compares an input image to its database of preprocessed images and determines 159 | if the input matches any image in the database. The database need not contain the original image as inputs 160 | can be matched to any 2D affine transformation of the original. This means that images which have been 161 | scaled (uniformly or non-uniformly), skewed, translated, cropped or rotated (or have undergone any combination 162 | of these transformations) can be identified as coming from the same source image (Figure 1). 163 |

164 |

165 | The algorithm runs in sublinear time with respect to the number of images in the database regardless of the number of transformations applied. 166 | Note that if image-matching could not be done in sublinear time it would not 167 | function at the scale that the likes of Google or Microsoft require. 168 |

169 |
170 | 174 |
Figure 1. 2D affine transformation invariant image-matching
175 |
176 |

177 | If the input is a composite of images or image fragments, the algorithm will return matches for each 178 | image/image fragment (Figure 2). 179 |

180 |
181 | 182 |
Figure 2. The query image (c), which is a composite of (a) and (b), matches the 183 | two images (d) and (e) stored in the database. The code to reproduce this result can be found here. 184 |
185 |
186 |
187 |
188 |

How it Works

189 |
190 |
191 |
1.
192 |
193 | The algorithm finds keypoints in the input using edge detection1. 194 |
195 |
196 |
197 |
2.
198 |
199 | Each set of three keypoints is converted into a triangle2. 200 |
201 |
202 |
203 |
3.
204 |
205 | These triangles are transformed into equilateral triangles. 206 |
207 |
208 |
209 |
4.
210 |
211 | Each equilateral triangle is rotated to each of it's 3 edges and a perceptual hashing algorithm (in this case PHash) is used to produce a hash for each side3. 212 |
213 |
214 |
215 |
5.
216 |
217 | The algorithm compares the hash to those stored in the database and returns all matching 218 | images4. 219 |
220 |
221 |
222 |
223 |

224 | All images in the database have been preprocessed in this manner to produce hashes for comparison. 225 |

226 |
227 |
228 | 232 |
Figure 3. Step-by-step video guide showing how the algorithm operates
233 |
234 |
235 |
236 |
237 | 1 238 |
239 |
240 | Any keypoint-finding algorithm can be used so long as it is 2D 241 | affine transformation-invariant. 242 |
243 |
244 |
245 |
246 | 2 247 |
248 |
249 | The comparison can be done through a hash lookup (which can be done in constant time with respect to the number of images in the database) 250 | or by finding a ‘nearest-neighbour’ in the database (which can be done 251 | in amortized O(log2n) time). 252 |
253 |
254 |
255 |
256 | 3 257 |
258 |
259 | Rotating the triangle to each of it's 3 sides and hashing each rotation keeps the algorithm rotation invariant. 260 |
261 |
262 |
263 |
264 | 4 265 |
266 |
267 | The algorithm will return multiple matches if the input is a 268 | composite of images. 269 |
270 |
271 |
272 |
273 |
274 |

How it Compares to the Competition

275 |

276 | As you can see in Figure 4 below, the algorithm performs better than industry leaders in matching 277 | images which have undergone 2D affine transformations. Even the best-performing service, Google Image 278 | Search, fails to handle a simple 45 degree rotation. 279 |

280 |
281 | 282 |
283 | Figure 4. Comparison of the image-matching capabilities of our 284 | algorithm versus market leaders. The code to reproduce this result can be found here. 285 |
286 |
287 |

288 | Market leaders show limited ability to find matches of images which have undergone certain transformations. 289 | Our algorithm solves this problem for 2D affine transformations and, if used in 290 | conjunction with other modern techniques, offers a significant improvement in reverse-image searching. 291 |

292 |
293 |
294 |
295 |
296 | 297 | 298 | 301 | 304 | 305 | 306 | 307 | 338 | 348 | 349 | 357 | 360 | 361 | -------------------------------------------------------------------------------- /docs/jspolygon.js: -------------------------------------------------------------------------------- 1 | /* 2 | * This is the Point constructor. Polygon uses this object, but it is 3 | * just a really simple set of x and y coordinates. 4 | */ 5 | function Point(px, py) { 6 | this.x = px; 7 | this.y = py; 8 | } 9 | 10 | /* 11 | * This is the Polygon constructor. All points are center-relative. 12 | */ 13 | function Polygon(c, clr) { 14 | 15 | this.points = new Array(); 16 | this.center = c; 17 | this.color = clr; // used when drawing 18 | 19 | } 20 | 21 | /* 22 | * Point x and y values should be relative to the center. 23 | */ 24 | Polygon.prototype.addPoint = function(p) { 25 | this.points.push(p); 26 | } 27 | 28 | /* 29 | * Point x and y values should be absolute coordinates. 30 | */ 31 | Polygon.prototype.addAbsolutePoint = function(p) { 32 | this.points.push( { "x": p.x - this.center.x, "y": p.y - this.center.y } ); 33 | } 34 | 35 | /* 36 | * Returns the number of sides. Equal to the number of vertices. 37 | */ 38 | Polygon.prototype.getNumberOfSides = function() { 39 | return this.points.length; 40 | } 41 | 42 | /* 43 | * rotate the polygon by a number of radians 44 | */ 45 | Polygon.prototype.rotate = function(rads) { 46 | 47 | for (var i = 0; i < this.points.length; i++) { 48 | var x = this.points[i].x; 49 | var y = this.points[i].y; 50 | this.points[i].x = Math.cos(rads) * x - Math.sin(rads) * y; 51 | this.points[i].y = Math.sin(rads) * x + Math.cos(rads) * y; 52 | } 53 | 54 | } 55 | 56 | /* 57 | * The draw function takes as a parameter a Context object from 58 | * a Canvas element and draws the polygon on it. 59 | */ 60 | Polygon.prototype.draw = function(ctx) { 61 | 62 | ctx.save(); 63 | 64 | ctx.fillStyle = this.color; 65 | ctx.beginPath(); 66 | ctx.moveTo(this.points[0].x + this.center.x, this.points[0].y + this.center.y); 67 | for (var i = 1; i < this.points.length; i++) { 68 | ctx.lineTo(this.points[i].x + this.center.x, this.points[i].y + this.center.y); 69 | } 70 | ctx.closePath(); 71 | ctx.fill(); 72 | 73 | ctx.restore(); 74 | 75 | } 76 | 77 | /* 78 | * This function returns true if the given point is inside the polygon, 79 | * and false otherwise. 80 | */ 81 | Polygon.prototype.containsPoint = function(pnt) { 82 | 83 | var nvert = this.points.length; 84 | var testx = pnt.x; 85 | var testy = pnt.y; 86 | 87 | var vertx = new Array(); 88 | for (var q = 0; q < this.points.length; q++) { 89 | vertx.push(this.points[q].x + this.center.x); 90 | } 91 | 92 | var verty = new Array(); 93 | for (var w = 0; w < this.points.length; w++) { 94 | verty.push(this.points[w].y + this.center.y); 95 | } 96 | 97 | var i, j = 0; 98 | var c = false; 99 | for (i = 0, j = nvert - 1; i < nvert; j = i++) { 100 | if ( ((verty[i]>testy) != (verty[j]>testy)) && 101 | (testx < (vertx[j]-vertx[i]) * (testy-verty[i]) / (verty[j]-verty[i]) + vertx[i]) ) 102 | c = !c; 103 | } 104 | return c; 105 | 106 | } 107 | 108 | /* 109 | * To detect intersection with another Polygon object, this 110 | * function uses the Separating Axis Theorem. It returns false 111 | * if there is no intersection, or an object if there is. The object 112 | * contains 2 fields, overlap and axis. Moving the polygon by overlap 113 | * on axis will get the polygons out of intersection. 114 | */ 115 | Polygon.prototype.intersectsWith = function(other) { 116 | 117 | var axis = new Point(); 118 | var tmp, minA, maxA, minB, maxB; 119 | var side, i; 120 | var smallest = null; 121 | var overlap = 99999999; 122 | 123 | /* test polygon A's sides */ 124 | for (side = 0; side < this.getNumberOfSides(); side++) 125 | { 126 | /* get the axis that we will project onto */ 127 | if (side == 0) 128 | { 129 | axis.x = this.points[this.getNumberOfSides() - 1].y - this.points[0].y; 130 | axis.y = this.points[0].x - this.points[this.getNumberOfSides() - 1].x; 131 | } 132 | else 133 | { 134 | axis.x = this.points[side - 1].y - this.points[side].y; 135 | axis.y = this.points[side].x - this.points[side - 1].x; 136 | } 137 | 138 | /* normalize the axis */ 139 | tmp = Math.sqrt(axis.x * axis.x + axis.y * axis.y); 140 | axis.x /= tmp; 141 | axis.y /= tmp; 142 | 143 | /* project polygon A onto axis to determine the min/max */ 144 | minA = maxA = this.points[0].x * axis.x + this.points[0].y * axis.y; 145 | for (i = 1; i < this.getNumberOfSides(); i++) 146 | { 147 | tmp = this.points[i].x * axis.x + this.points[i].y * axis.y; 148 | if (tmp > maxA) 149 | maxA = tmp; 150 | else if (tmp < minA) 151 | minA = tmp; 152 | } 153 | /* correct for offset */ 154 | tmp = this.center.x * axis.x + this.center.y * axis.y; 155 | minA += tmp; 156 | maxA += tmp; 157 | 158 | /* project polygon B onto axis to determine the min/max */ 159 | minB = maxB = other.points[0].x * axis.x + other.points[0].y * axis.y; 160 | for (i = 1; i < other.getNumberOfSides(); i++) 161 | { 162 | tmp = other.points[i].x * axis.x + other.points[i].y * axis.y; 163 | if (tmp > maxB) 164 | maxB = tmp; 165 | else if (tmp < minB) 166 | minB = tmp; 167 | } 168 | /* correct for offset */ 169 | tmp = other.center.x * axis.x + other.center.y * axis.y; 170 | minB += tmp; 171 | maxB += tmp; 172 | 173 | /* test if lines intersect, if not, return false */ 174 | if (maxA < minB || minA > maxB) { 175 | return false; 176 | } else { 177 | var o = (maxA > maxB ? maxB - minA : maxA - minB); 178 | if (o < overlap) { 179 | overlap = o; 180 | smallest = {x: axis.x, y: axis.y}; 181 | } 182 | } 183 | } 184 | 185 | /* test polygon B's sides */ 186 | for (side = 0; side < other.getNumberOfSides(); side++) 187 | { 188 | /* get the axis that we will project onto */ 189 | if (side == 0) 190 | { 191 | axis.x = other.points[other.getNumberOfSides() - 1].y - other.points[0].y; 192 | axis.y = other.points[0].x - other.points[other.getNumberOfSides() - 1].x; 193 | } 194 | else 195 | { 196 | axis.x = other.points[side - 1].y - other.points[side].y; 197 | axis.y = other.points[side].x - other.points[side - 1].x; 198 | } 199 | 200 | /* normalize the axis */ 201 | tmp = Math.sqrt(axis.x * axis.x + axis.y * axis.y); 202 | axis.x /= tmp; 203 | axis.y /= tmp; 204 | 205 | /* project polygon A onto axis to determine the min/max */ 206 | minA = maxA = this.points[0].x * axis.x + this.points[0].y * axis.y; 207 | for (i = 1; i < this.getNumberOfSides(); i++) 208 | { 209 | tmp = this.points[i].x * axis.x + this.points[i].y * axis.y; 210 | if (tmp > maxA) 211 | maxA = tmp; 212 | else if (tmp < minA) 213 | minA = tmp; 214 | } 215 | /* correct for offset */ 216 | tmp = this.center.x * axis.x + this.center.y * axis.y; 217 | minA += tmp; 218 | maxA += tmp; 219 | 220 | /* project polygon B onto axis to determine the min/max */ 221 | minB = maxB = other.points[0].x * axis.x + other.points[0].y * axis.y; 222 | for (i = 1; i < other.getNumberOfSides(); i++) 223 | { 224 | tmp = other.points[i].x * axis.x + other.points[i].y * axis.y; 225 | if (tmp > maxB) 226 | maxB = tmp; 227 | else if (tmp < minB) 228 | minB = tmp; 229 | } 230 | /* correct for offset */ 231 | tmp = other.center.x * axis.x + other.center.y * axis.y; 232 | minB += tmp; 233 | maxB += tmp; 234 | 235 | /* test if lines intersect, if not, return false */ 236 | if (maxA < minB || minA > maxB) { 237 | return false; 238 | } else { 239 | var o = (maxA > maxB ? maxB - minA : maxA - minB); 240 | if (o < overlap) { 241 | overlap = o; 242 | smallest = {x: axis.x, y: axis.y}; 243 | } 244 | } 245 | } 246 | 247 | return {"overlap": overlap + 0.001, "axis": smallest}; 248 | 249 | } 250 | -------------------------------------------------------------------------------- /docs/moment.min.js: -------------------------------------------------------------------------------- 1 | //! moment.js 2 | //! version : 2.3.1 3 | //! authors : Tim Wood, Iskren Chernev, Moment.js contributors 4 | //! license : MIT 5 | //! momentjs.com 6 | (function(a){function b(a,b){return function(c){return i(a.call(this,c),b)}}function c(a,b){return function(c){return this.lang().ordinal(a.call(this,c),b)}}function d(){}function e(a){u(a),g(this,a)}function f(a){var b=o(a),c=b.year||0,d=b.month||0,e=b.week||0,f=b.day||0,g=b.hour||0,h=b.minute||0,i=b.second||0,j=b.millisecond||0;this._input=a,this._milliseconds=+j+1e3*i+6e4*h+36e5*g,this._days=+f+7*e,this._months=+d+12*c,this._data={},this._bubble()}function g(a,b){for(var c in b)b.hasOwnProperty(c)&&(a[c]=b[c]);return b.hasOwnProperty("toString")&&(a.toString=b.toString),b.hasOwnProperty("valueOf")&&(a.valueOf=b.valueOf),a}function h(a){return 0>a?Math.ceil(a):Math.floor(a)}function i(a,b){for(var c=a+"";c.lengthd;d++)(c&&a[d]!==b[d]||!c&&q(a[d])!==q(b[d]))&&g++;return g+f}function n(a){if(a){var b=a.toLowerCase().replace(/(.)s$/,"$1");a=Jb[a]||Kb[b]||b}return a}function o(a){var b,c,d={};for(c in a)a.hasOwnProperty(c)&&(b=n(c),b&&(d[b]=a[c]));return d}function p(b){var c,d;if(0===b.indexOf("week"))c=7,d="day";else{if(0!==b.indexOf("month"))return;c=12,d="month"}bb[b]=function(e,f){var g,h,i=bb.fn._lang[b],j=[];if("number"==typeof e&&(f=e,e=a),h=function(a){var b=bb().utc().set(d,a);return i.call(bb.fn._lang,b,e||"")},null!=f)return h(f);for(g=0;c>g;g++)j.push(h(g));return j}}function q(a){var b=+a,c=0;return 0!==b&&isFinite(b)&&(c=b>=0?Math.floor(b):Math.ceil(b)),c}function r(a,b){return new Date(Date.UTC(a,b+1,0)).getUTCDate()}function s(a){return t(a)?366:365}function t(a){return 0===a%4&&0!==a%100||0===a%400}function u(a){var b;a._a&&-2===a._pf.overflow&&(b=a._a[gb]<0||a._a[gb]>11?gb:a._a[hb]<1||a._a[hb]>r(a._a[fb],a._a[gb])?hb:a._a[ib]<0||a._a[ib]>23?ib:a._a[jb]<0||a._a[jb]>59?jb:a._a[kb]<0||a._a[kb]>59?kb:a._a[lb]<0||a._a[lb]>999?lb:-1,a._pf._overflowDayOfYear&&(fb>b||b>hb)&&(b=hb),a._pf.overflow=b)}function v(a){a._pf={empty:!1,unusedTokens:[],unusedInput:[],overflow:-2,charsLeftOver:0,nullInput:!1,invalidMonth:null,invalidFormat:!1,userInvalidated:!1}}function w(a){return null==a._isValid&&(a._isValid=!isNaN(a._d.getTime())&&a._pf.overflow<0&&!a._pf.empty&&!a._pf.invalidMonth&&!a._pf.nullInput&&!a._pf.invalidFormat&&!a._pf.userInvalidated,a._strict&&(a._isValid=a._isValid&&0===a._pf.charsLeftOver&&0===a._pf.unusedTokens.length)),a._isValid}function x(a){return a?a.toLowerCase().replace("_","-"):a}function y(a,b){return b.abbr=a,mb[a]||(mb[a]=new d),mb[a].set(b),mb[a]}function z(a){delete mb[a]}function A(a){var b,c,d,e,f=0,g=function(a){if(!mb[a]&&nb)try{require("./lang/"+a)}catch(b){}return mb[a]};if(!a)return bb.fn._lang;if(!k(a)){if(c=g(a))return c;a=[a]}for(;f0;){if(c=g(e.slice(0,b).join("-")))return c;if(d&&d.length>=b&&m(e,d,!0)>=b-1)break;b--}f++}return bb.fn._lang}function B(a){return a.match(/\[[\s\S]/)?a.replace(/^\[|\]$/g,""):a.replace(/\\/g,"")}function C(a){var b,c,d=a.match(rb);for(b=0,c=d.length;c>b;b++)d[b]=Ob[d[b]]?Ob[d[b]]:B(d[b]);return function(e){var f="";for(b=0;c>b;b++)f+=d[b]instanceof Function?d[b].call(e,a):d[b];return f}}function D(a,b){return a.isValid()?(b=E(b,a.lang()),Lb[b]||(Lb[b]=C(b)),Lb[b](a)):a.lang().invalidDate()}function E(a,b){function c(a){return b.longDateFormat(a)||a}var d=5;for(sb.lastIndex=0;d>=0&&sb.test(a);)a=a.replace(sb,c),sb.lastIndex=0,d-=1;return a}function F(a,b){var c;switch(a){case"DDDD":return vb;case"YYYY":case"GGGG":case"gggg":return wb;case"YYYYY":case"GGGGG":case"ggggg":return xb;case"S":case"SS":case"SSS":case"DDD":return ub;case"MMM":case"MMMM":case"dd":case"ddd":case"dddd":return yb;case"a":case"A":return A(b._l)._meridiemParse;case"X":return Bb;case"Z":case"ZZ":return zb;case"T":return Ab;case"MM":case"DD":case"YY":case"GG":case"gg":case"HH":case"hh":case"mm":case"ss":case"M":case"D":case"d":case"H":case"h":case"m":case"s":case"w":case"ww":case"W":case"WW":case"e":case"E":return tb;default:return c=new RegExp(N(M(a.replace("\\","")),"i"))}}function G(a){var b=(zb.exec(a)||[])[0],c=(b+"").match(Gb)||["-",0,0],d=+(60*c[1])+q(c[2]);return"+"===c[0]?-d:d}function H(a,b,c){var d,e=c._a;switch(a){case"M":case"MM":null!=b&&(e[gb]=q(b)-1);break;case"MMM":case"MMMM":d=A(c._l).monthsParse(b),null!=d?e[gb]=d:c._pf.invalidMonth=b;break;case"D":case"DD":null!=b&&(e[hb]=q(b));break;case"DDD":case"DDDD":null!=b&&(c._dayOfYear=q(b));break;case"YY":e[fb]=q(b)+(q(b)>68?1900:2e3);break;case"YYYY":case"YYYYY":e[fb]=q(b);break;case"a":case"A":c._isPm=A(c._l).isPM(b);break;case"H":case"HH":case"h":case"hh":e[ib]=q(b);break;case"m":case"mm":e[jb]=q(b);break;case"s":case"ss":e[kb]=q(b);break;case"S":case"SS":case"SSS":e[lb]=q(1e3*("0."+b));break;case"X":c._d=new Date(1e3*parseFloat(b));break;case"Z":case"ZZ":c._useUTC=!0,c._tzm=G(b);break;case"w":case"ww":case"W":case"WW":case"d":case"dd":case"ddd":case"dddd":case"e":case"E":a=a.substr(0,1);case"gg":case"gggg":case"GG":case"GGGG":case"GGGGG":a=a.substr(0,2),b&&(c._w=c._w||{},c._w[a]=b)}}function I(a){var b,c,d,e,f,g,h,i,j,k,l=[];if(!a._d){for(d=K(a),a._w&&null==a._a[hb]&&null==a._a[gb]&&(f=function(b){return b?b.length<3?parseInt(b,10)>68?"19"+b:"20"+b:b:null==a._a[fb]?bb().weekYear():a._a[fb]},g=a._w,null!=g.GG||null!=g.W||null!=g.E?h=X(f(g.GG),g.W||1,g.E,4,1):(i=A(a._l),j=null!=g.d?T(g.d,i):null!=g.e?parseInt(g.e,10)+i._week.dow:0,k=parseInt(g.w,10)||1,null!=g.d&&js(e)&&(a._pf._overflowDayOfYear=!0),c=S(e,0,a._dayOfYear),a._a[gb]=c.getUTCMonth(),a._a[hb]=c.getUTCDate()),b=0;3>b&&null==a._a[b];++b)a._a[b]=l[b]=d[b];for(;7>b;b++)a._a[b]=l[b]=null==a._a[b]?2===b?1:0:a._a[b];l[ib]+=q((a._tzm||0)/60),l[jb]+=q((a._tzm||0)%60),a._d=(a._useUTC?S:R).apply(null,l)}}function J(a){var b;a._d||(b=o(a._i),a._a=[b.year,b.month,b.day,b.hour,b.minute,b.second,b.millisecond],I(a))}function K(a){var b=new Date;return a._useUTC?[b.getUTCFullYear(),b.getUTCMonth(),b.getUTCDate()]:[b.getFullYear(),b.getMonth(),b.getDate()]}function L(a){a._a=[],a._pf.empty=!0;var b,c,d,e,f,g=A(a._l),h=""+a._i,i=h.length,j=0;for(d=E(a._f,g).match(rb)||[],b=0;b0&&a._pf.unusedInput.push(f),h=h.slice(h.indexOf(c)+c.length),j+=c.length),Ob[e]?(c?a._pf.empty=!1:a._pf.unusedTokens.push(e),H(e,c,a)):a._strict&&!c&&a._pf.unusedTokens.push(e);a._pf.charsLeftOver=i-j,h.length>0&&a._pf.unusedInput.push(h),a._isPm&&a._a[ib]<12&&(a._a[ib]+=12),a._isPm===!1&&12===a._a[ib]&&(a._a[ib]=0),I(a),u(a)}function M(a){return a.replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,function(a,b,c,d,e){return b||c||d||e})}function N(a){return a.replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&")}function O(a){var b,c,d,e,f;if(0===a._f.length)return a._pf.invalidFormat=!0,a._d=new Date(0/0),void 0;for(e=0;ef)&&(d=f,c=b));g(a,c||b)}function P(a){var b,c=a._i,d=Cb.exec(c);if(d){for(b=4;b>0;b--)if(d[b]){a._f=Eb[b-1]+(d[6]||" ");break}for(b=0;4>b;b++)if(Fb[b][1].exec(c)){a._f+=Fb[b][0];break}zb.exec(c)&&(a._f+=" Z"),L(a)}else a._d=new Date(c)}function Q(b){var c=b._i,d=ob.exec(c);c===a?b._d=new Date:d?b._d=new Date(+d[1]):"string"==typeof c?P(b):k(c)?(b._a=c.slice(0),I(b)):l(c)?b._d=new Date(+c):"object"==typeof c?J(b):b._d=new Date(c)}function R(a,b,c,d,e,f,g){var h=new Date(a,b,c,d,e,f,g);return 1970>a&&h.setFullYear(a),h}function S(a){var b=new Date(Date.UTC.apply(null,arguments));return 1970>a&&b.setUTCFullYear(a),b}function T(a,b){if("string"==typeof a)if(isNaN(a)){if(a=b.weekdaysParse(a),"number"!=typeof a)return null}else a=parseInt(a,10);return a}function U(a,b,c,d,e){return e.relativeTime(b||1,!!c,a,d)}function V(a,b,c){var d=eb(Math.abs(a)/1e3),e=eb(d/60),f=eb(e/60),g=eb(f/24),h=eb(g/365),i=45>d&&["s",d]||1===e&&["m"]||45>e&&["mm",e]||1===f&&["h"]||22>f&&["hh",f]||1===g&&["d"]||25>=g&&["dd",g]||45>=g&&["M"]||345>g&&["MM",eb(g/30)]||1===h&&["y"]||["yy",h];return i[2]=b,i[3]=a>0,i[4]=c,U.apply({},i)}function W(a,b,c){var d,e=c-b,f=c-a.day();return f>e&&(f-=7),e-7>f&&(f+=7),d=bb(a).add("d",f),{week:Math.ceil(d.dayOfYear()/7),year:d.year()}}function X(a,b,c,d,e){var f,g,h=new Date(Date.UTC(a,0)).getUTCDay();return c=null!=c?c:e,f=e-h+(h>d?7:0),g=7*(b-1)+(c-e)+f+1,{year:g>0?a:a-1,dayOfYear:g>0?g:s(a-1)+g}}function Y(a){var b=a._i,c=a._f;return"undefined"==typeof a._pf&&v(a),null===b?bb.invalid({nullInput:!0}):("string"==typeof b&&(a._i=b=A().preparse(b)),bb.isMoment(b)?(a=g({},b),a._d=new Date(+b._d)):c?k(c)?O(a):L(a):Q(a),new e(a))}function Z(a,b){bb.fn[a]=bb.fn[a+"s"]=function(a){var c=this._isUTC?"UTC":"";return null!=a?(this._d["set"+c+b](a),bb.updateOffset(this),this):this._d["get"+c+b]()}}function $(a){bb.duration.fn[a]=function(){return this._data[a]}}function _(a,b){bb.duration.fn["as"+a]=function(){return+this/b}}function ab(){"undefined"==typeof ender&&(this.moment=bb)}for(var bb,cb,db="2.3.1",eb=Math.round,fb=0,gb=1,hb=2,ib=3,jb=4,kb=5,lb=6,mb={},nb="undefined"!=typeof module&&module.exports,ob=/^\/?Date\((\-?\d+)/i,pb=/(\-)?(?:(\d*)\.)?(\d+)\:(\d+)(?:\:(\d+)\.?(\d{3})?)?/,qb=/^(-)?P(?:(?:([0-9,.]*)Y)?(?:([0-9,.]*)M)?(?:([0-9,.]*)D)?(?:T(?:([0-9,.]*)H)?(?:([0-9,.]*)M)?(?:([0-9,.]*)S)?)?|([0-9,.]*)W)$/,rb=/(\[[^\[]*\])|(\\)?(Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|YYYYY|YYYY|YY|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|mm?|ss?|SS?S?|X|zz?|ZZ?|.)/g,sb=/(\[[^\[]*\])|(\\)?(LT|LL?L?L?|l{1,4})/g,tb=/\d\d?/,ub=/\d{1,3}/,vb=/\d{3}/,wb=/\d{1,4}/,xb=/[+\-]?\d{1,6}/,yb=/[0-9]*['a-z\u00A0-\u05FF\u0700-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]+|[\u0600-\u06FF\/]+(\s*?[\u0600-\u06FF]+){1,2}/i,zb=/Z|[\+\-]\d\d:?\d\d/i,Ab=/T/i,Bb=/[\+\-]?\d+(\.\d{1,3})?/,Cb=/^\s*\d{4}-(?:(\d\d-\d\d)|(W\d\d$)|(W\d\d-\d)|(\d\d\d))((T| )(\d\d(:\d\d(:\d\d(\.\d\d?\d?)?)?)?)?([\+\-]\d\d:?\d\d)?)?$/,Db="YYYY-MM-DDTHH:mm:ssZ",Eb=["YYYY-MM-DD","GGGG-[W]WW","GGGG-[W]WW-E","YYYY-DDD"],Fb=[["HH:mm:ss.S",/(T| )\d\d:\d\d:\d\d\.\d{1,3}/],["HH:mm:ss",/(T| )\d\d:\d\d:\d\d/],["HH:mm",/(T| )\d\d:\d\d/],["HH",/(T| )\d\d/]],Gb=/([\+\-]|\d\d)/gi,Hb="Date|Hours|Minutes|Seconds|Milliseconds".split("|"),Ib={Milliseconds:1,Seconds:1e3,Minutes:6e4,Hours:36e5,Days:864e5,Months:2592e6,Years:31536e6},Jb={ms:"millisecond",s:"second",m:"minute",h:"hour",d:"day",D:"date",w:"week",W:"isoWeek",M:"month",y:"year",DDD:"dayOfYear",e:"weekday",E:"isoWeekday",gg:"weekYear",GG:"isoWeekYear"},Kb={dayofyear:"dayOfYear",isoweekday:"isoWeekday",isoweek:"isoWeek",weekyear:"weekYear",isoweekyear:"isoWeekYear"},Lb={},Mb="DDD w W M D d".split(" "),Nb="M D H h m s w W".split(" "),Ob={M:function(){return this.month()+1},MMM:function(a){return this.lang().monthsShort(this,a)},MMMM:function(a){return this.lang().months(this,a)},D:function(){return this.date()},DDD:function(){return this.dayOfYear()},d:function(){return this.day()},dd:function(a){return this.lang().weekdaysMin(this,a)},ddd:function(a){return this.lang().weekdaysShort(this,a)},dddd:function(a){return this.lang().weekdays(this,a)},w:function(){return this.week()},W:function(){return this.isoWeek()},YY:function(){return i(this.year()%100,2)},YYYY:function(){return i(this.year(),4)},YYYYY:function(){return i(this.year(),5)},gg:function(){return i(this.weekYear()%100,2)},gggg:function(){return this.weekYear()},ggggg:function(){return i(this.weekYear(),5)},GG:function(){return i(this.isoWeekYear()%100,2)},GGGG:function(){return this.isoWeekYear()},GGGGG:function(){return i(this.isoWeekYear(),5)},e:function(){return this.weekday()},E:function(){return this.isoWeekday()},a:function(){return this.lang().meridiem(this.hours(),this.minutes(),!0)},A:function(){return this.lang().meridiem(this.hours(),this.minutes(),!1)},H:function(){return this.hours()},h:function(){return this.hours()%12||12},m:function(){return this.minutes()},s:function(){return this.seconds()},S:function(){return q(this.milliseconds()/100)},SS:function(){return i(q(this.milliseconds()/10),2)},SSS:function(){return i(this.milliseconds(),3)},Z:function(){var a=-this.zone(),b="+";return 0>a&&(a=-a,b="-"),b+i(q(a/60),2)+":"+i(q(a)%60,2)},ZZ:function(){var a=-this.zone(),b="+";return 0>a&&(a=-a,b="-"),b+i(q(10*a/6),4)},z:function(){return this.zoneAbbr()},zz:function(){return this.zoneName()},X:function(){return this.unix()}},Pb=["months","monthsShort","weekdays","weekdaysShort","weekdaysMin"];Mb.length;)cb=Mb.pop(),Ob[cb+"o"]=c(Ob[cb],cb);for(;Nb.length;)cb=Nb.pop(),Ob[cb+cb]=b(Ob[cb],2);for(Ob.DDDD=b(Ob.DDD,3),g(d.prototype,{set:function(a){var b,c;for(c in a)b=a[c],"function"==typeof b?this[c]=b:this["_"+c]=b},_months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),months:function(a){return this._months[a.month()]},_monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),monthsShort:function(a){return this._monthsShort[a.month()]},monthsParse:function(a){var b,c,d;for(this._monthsParse||(this._monthsParse=[]),b=0;12>b;b++)if(this._monthsParse[b]||(c=bb.utc([2e3,b]),d="^"+this.months(c,"")+"|^"+this.monthsShort(c,""),this._monthsParse[b]=new RegExp(d.replace(".",""),"i")),this._monthsParse[b].test(a))return b},_weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdays:function(a){return this._weekdays[a.day()]},_weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysShort:function(a){return this._weekdaysShort[a.day()]},_weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),weekdaysMin:function(a){return this._weekdaysMin[a.day()]},weekdaysParse:function(a){var b,c,d;for(this._weekdaysParse||(this._weekdaysParse=[]),b=0;7>b;b++)if(this._weekdaysParse[b]||(c=bb([2e3,1]).day(b),d="^"+this.weekdays(c,"")+"|^"+this.weekdaysShort(c,"")+"|^"+this.weekdaysMin(c,""),this._weekdaysParse[b]=new RegExp(d.replace(".",""),"i")),this._weekdaysParse[b].test(a))return b},_longDateFormat:{LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D YYYY",LLL:"MMMM D YYYY LT",LLLL:"dddd, MMMM D YYYY LT"},longDateFormat:function(a){var b=this._longDateFormat[a];return!b&&this._longDateFormat[a.toUpperCase()]&&(b=this._longDateFormat[a.toUpperCase()].replace(/MMMM|MM|DD|dddd/g,function(a){return a.slice(1)}),this._longDateFormat[a]=b),b},isPM:function(a){return"p"===(a+"").toLowerCase().charAt(0)},_meridiemParse:/[ap]\.?m?\.?/i,meridiem:function(a,b,c){return a>11?c?"pm":"PM":c?"am":"AM"},_calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},calendar:function(a,b){var c=this._calendar[a];return"function"==typeof c?c.apply(b):c},_relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},relativeTime:function(a,b,c,d){var e=this._relativeTime[c];return"function"==typeof e?e(a,b,c,d):e.replace(/%d/i,a)},pastFuture:function(a,b){var c=this._relativeTime[a>0?"future":"past"];return"function"==typeof c?c(b):c.replace(/%s/i,b)},ordinal:function(a){return this._ordinal.replace("%d",a)},_ordinal:"%d",preparse:function(a){return a},postformat:function(a){return a},week:function(a){return W(a,this._week.dow,this._week.doy).week},_week:{dow:0,doy:6},_invalidDate:"Invalid date",invalidDate:function(){return this._invalidDate}}),bb=function(b,c,d,e){return"boolean"==typeof d&&(e=d,d=a),Y({_i:b,_f:c,_l:d,_strict:e,_isUTC:!1})},bb.utc=function(b,c,d,e){var f;return"boolean"==typeof d&&(e=d,d=a),f=Y({_useUTC:!0,_isUTC:!0,_l:d,_i:b,_f:c,_strict:e}).utc()},bb.unix=function(a){return bb(1e3*a)},bb.duration=function(a,b){var c,d,e,g=bb.isDuration(a),h="number"==typeof a,i=g?a._input:h?{}:a,j=null;return h?b?i[b]=a:i.milliseconds=a:(j=pb.exec(a))?(c="-"===j[1]?-1:1,i={y:0,d:q(j[hb])*c,h:q(j[ib])*c,m:q(j[jb])*c,s:q(j[kb])*c,ms:q(j[lb])*c}):(j=qb.exec(a))&&(c="-"===j[1]?-1:1,e=function(a){var b=a&&parseFloat(a.replace(",","."));return(isNaN(b)?0:b)*c},i={y:e(j[2]),M:e(j[3]),d:e(j[4]),h:e(j[5]),m:e(j[6]),s:e(j[7]),w:e(j[8])}),d=new f(i),g&&a.hasOwnProperty("_lang")&&(d._lang=a._lang),d},bb.version=db,bb.defaultFormat=Db,bb.updateOffset=function(){},bb.lang=function(a,b){var c;return a?(b?y(x(a),b):null===b?(z(a),a="en"):mb[a]||A(a),c=bb.duration.fn._lang=bb.fn._lang=A(a),c._abbr):bb.fn._lang._abbr},bb.langData=function(a){return a&&a._lang&&a._lang._abbr&&(a=a._lang._abbr),A(a)},bb.isMoment=function(a){return a instanceof e},bb.isDuration=function(a){return a instanceof f},cb=Pb.length-1;cb>=0;--cb)p(Pb[cb]);for(bb.normalizeUnits=function(a){return n(a)},bb.invalid=function(a){var b=bb.utc(0/0);return null!=a?g(b._pf,a):b._pf.userInvalidated=!0,b},bb.parseZone=function(a){return bb(a).parseZone()},g(bb.fn=e.prototype,{clone:function(){return bb(this)},valueOf:function(){return+this._d+6e4*(this._offset||0)},unix:function(){return Math.floor(+this/1e3)},toString:function(){return this.clone().lang("en").format("ddd MMM DD YYYY HH:mm:ss [GMT]ZZ")},toDate:function(){return this._offset?new Date(+this):this._d},toISOString:function(){return D(bb(this).utc(),"YYYY-MM-DD[T]HH:mm:ss.SSS[Z]")},toArray:function(){var a=this;return[a.year(),a.month(),a.date(),a.hours(),a.minutes(),a.seconds(),a.milliseconds()]},isValid:function(){return w(this)},isDSTShifted:function(){return this._a?this.isValid()&&m(this._a,(this._isUTC?bb.utc(this._a):bb(this._a)).toArray())>0:!1},parsingFlags:function(){return g({},this._pf)},invalidAt:function(){return this._pf.overflow},utc:function(){return this.zone(0)},local:function(){return this.zone(0),this._isUTC=!1,this},format:function(a){var b=D(this,a||bb.defaultFormat);return this.lang().postformat(b)},add:function(a,b){var c;return c="string"==typeof a?bb.duration(+b,a):bb.duration(a,b),j(this,c,1),this},subtract:function(a,b){var c;return c="string"==typeof a?bb.duration(+b,a):bb.duration(a,b),j(this,c,-1),this},diff:function(a,b,c){var d,e,f=this._isUTC?bb(a).zone(this._offset||0):bb(a).local(),g=6e4*(this.zone()-f.zone());return b=n(b),"year"===b||"month"===b?(d=432e5*(this.daysInMonth()+f.daysInMonth()),e=12*(this.year()-f.year())+(this.month()-f.month()),e+=(this-bb(this).startOf("month")-(f-bb(f).startOf("month")))/d,e-=6e4*(this.zone()-bb(this).startOf("month").zone()-(f.zone()-bb(f).startOf("month").zone()))/d,"year"===b&&(e/=12)):(d=this-f,e="second"===b?d/1e3:"minute"===b?d/6e4:"hour"===b?d/36e5:"day"===b?(d-g)/864e5:"week"===b?(d-g)/6048e5:d),c?e:h(e)},from:function(a,b){return bb.duration(this.diff(a)).lang(this.lang()._abbr).humanize(!b)},fromNow:function(a){return this.from(bb(),a)},calendar:function(){var a=this.diff(bb().zone(this.zone()).startOf("day"),"days",!0),b=-6>a?"sameElse":-1>a?"lastWeek":0>a?"lastDay":1>a?"sameDay":2>a?"nextDay":7>a?"nextWeek":"sameElse";return this.format(this.lang().calendar(b,this))},isLeapYear:function(){return t(this.year())},isDST:function(){return this.zone()+bb(a).startOf(b)},isBefore:function(a,b){return b="undefined"!=typeof b?b:"millisecond",+this.clone().startOf(b)<+bb(a).startOf(b)},isSame:function(a,b){return b="undefined"!=typeof b?b:"millisecond",+this.clone().startOf(b)===+bb(a).startOf(b)},min:function(a){return a=bb.apply(null,arguments),this>a?this:a},max:function(a){return a=bb.apply(null,arguments),a>this?this:a},zone:function(a){var b=this._offset||0;return null==a?this._isUTC?b:this._d.getTimezoneOffset():("string"==typeof a&&(a=G(a)),Math.abs(a)<16&&(a=60*a),this._offset=a,this._isUTC=!0,b!==a&&j(this,bb.duration(b-a,"m"),1,!0),this)},zoneAbbr:function(){return this._isUTC?"UTC":""},zoneName:function(){return this._isUTC?"Coordinated Universal Time":""},parseZone:function(){return"string"==typeof this._i&&this.zone(this._i),this},hasAlignedHourOffset:function(a){return a=a?bb(a).zone():0,0===(this.zone()-a)%60},daysInMonth:function(){return r(this.year(),this.month())},dayOfYear:function(a){var b=eb((bb(this).startOf("day")-bb(this).startOf("year"))/864e5)+1;return null==a?b:this.add("d",a-b)},weekYear:function(a){var b=W(this,this.lang()._week.dow,this.lang()._week.doy).year;return null==a?b:this.add("y",a-b)},isoWeekYear:function(a){var b=W(this,1,4).year;return null==a?b:this.add("y",a-b)},week:function(a){var b=this.lang().week(this);return null==a?b:this.add("d",7*(a-b))},isoWeek:function(a){var b=W(this,1,4).week;return null==a?b:this.add("d",7*(a-b))},weekday:function(a){var b=(this.day()+7-this.lang()._week.dow)%7;return null==a?b:this.add("d",a-b)},isoWeekday:function(a){return null==a?this.day()||7:this.day(this.day()%7?a:a-7)},get:function(a){return a=n(a),this[a]()},set:function(a,b){return a=n(a),"function"==typeof this[a]&&this[a](b),this},lang:function(b){return b===a?this._lang:(this._lang=A(b),this)}}),cb=0;cb li { 10 | cursor: pointer; 11 | } 12 | 13 | .nav > li > a { 14 | padding: 10px 15px; 15 | display: block; 16 | position: relative; 17 | } 18 | 19 | .nav > li.active > a { 20 | color: #555; 21 | cursor: default; 22 | background-color: #fff; 23 | border: 1px solid #ddd; 24 | border-bottom-color: transparent; 25 | border-top-right-radius: 4px; 26 | border-top-left-radius: 4px; 27 | } 28 | 29 | h2 { 30 | margin: 80px 0 40px 0; 31 | } 32 | 33 | #mainContentContainer { 34 | padding: 0px 30px; 35 | margin: 0px auto; 36 | width: 1200px; 37 | display: block; 38 | } 39 | 40 | .btn { 41 | cursor: pointer; 42 | } 43 | 44 | .demoAreaContainer { 45 | width: 1140px; 46 | display: block; 47 | } 48 | 49 | .bigCanvasContainerWrapper { 50 | width: 1140px; 51 | float: left; 52 | margin-bottom: 20px; 53 | } 54 | 55 | .bigCanvasContainer { 56 | width: 910px; 57 | margin: auto; 58 | } 59 | 60 | .prevBtn { 61 | float: left; 62 | margin-left: 35px; 63 | } 64 | 65 | .nextBtn { 66 | float: right; 67 | margin-right: 35px; 68 | } 69 | 70 | .move { 71 | cursor: move; 72 | } 73 | 74 | #lineSvgTag { 75 | pointer-events: none; 76 | } 77 | 78 | .btn { 79 | font-size: 12px; 80 | } 81 | 82 | #triangleOutputList { 83 | max-width: 400px; 84 | height: 320px; 85 | overflow: auto; 86 | margin-top: 20px; 87 | } 88 | 89 | .canvasWrapperLeft { 90 | display: block; 91 | /*width: 285px;*/ 92 | float: left; 93 | margin-top: 35px; 94 | margin-bottom: 40px; 95 | margin-right: 68px; 96 | position: relative; 97 | } 98 | 99 | .canvasWrapperRight { 100 | display: block; 101 | /*width: 285px;*/ 102 | float: right; 103 | margin-top: 35px; 104 | margin-bottom: 40px; 105 | position: relative; 106 | } 107 | 108 | .trianglesListWrapper { 109 | margin-left: 40px; 110 | height: 260px; 111 | width: 400px; 112 | display: inline-block; 113 | float: right; 114 | } 115 | 116 | .distanceOutputHelpMessage { 117 | padding: 8px; 118 | text-align: center; 119 | } 120 | 121 | .trianglesListInnerWrapper { 122 | height: 230px; 123 | display: block; 124 | overflow: auto; 125 | border-bottom: 1px solid rgba(0, 0, 0, 0.19); 126 | background-color: #fff; 127 | } 128 | 129 | .table-striped > tbody > tr:nth-child(2n+1) > td, .table-striped > tbody > tr:nth-child(2n+1) > th { 130 | background-color: rgba(0, 0, 0, 0.01); 131 | } 132 | 133 | .triangleListBody { 134 | overflow: auto; 135 | } 136 | 137 | .header { 138 | position: relative; 139 | background-color: #183D4E; 140 | width: 100%; 141 | min-width: 1200px; 142 | color: #ffffff; 143 | overflow: hidden; 144 | font-family: "Roboto", "Helvetica", "Arial", sans-serif; 145 | text-align: center; 146 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2); 147 | border-bottom: solid 1px rgba(0, 0, 0, 0.4); 148 | } 149 | 150 | .headerInner { 151 | margin: 35px auto; 152 | width: 1140px; 153 | } 154 | 155 | .headerInner h1 { 156 | font-size: 32px; 157 | } 158 | 159 | .buttonsWrapper { 160 | width: 100%; 161 | min-width: 1200px; 162 | border-bottom: 1px solid #ddd; 163 | box-shadow: 0 1px 4px rgba(0, 0, 0, 0.08); 164 | } 165 | 166 | .buttonsInner { 167 | margin: 8px auto 0px auto; 168 | padding: 2px 30px; 169 | width: 1200px; 170 | } 171 | 172 | .canvasTitle { 173 | color: rgba(0, 0, 0, .8); 174 | display: block; 175 | font-size: 12px; 176 | margin-bottom: 5px; 177 | padding-bottom: 0px; 178 | } 179 | 180 | .canvasTitleRight { 181 | position: absolute; 182 | top: 0px; 183 | right: 0px; 184 | font-size: 12px; 185 | } 186 | 187 | .triangleCountOutput { 188 | color: rgba(0, 0, 0, .8); 189 | display: block; 190 | font-size: 12px; 191 | margin: 0px; 192 | padding: 0px; 193 | margin-top: 3px; 194 | } 195 | 196 | #output { 197 | width: 1100px; 198 | margin: auto; 199 | } 200 | 201 | .aboutAreaWrapper { 202 | border-top: 1px solid rgba(0, 0, 0, 0.08); 203 | box-shadow: 0 0px 4px rgba(0, 0, 0, 0.1); 204 | margin: auto; 205 | width: 100%; 206 | min-width: 1200px; 207 | box-shadow: 0 0px 4px rgba(0, 0, 0, 0.1); 208 | } 209 | 210 | .aboutArea { 211 | font-family: "Roboto", "Helvetica", "Arial", sans-serif; 212 | color: rgba(0, 0, 0, 0.7); 213 | width: 700px; 214 | padding: 80px 0px; 215 | margin: auto; 216 | } 217 | 218 | .smallCanvasWrapper { 219 | /*width: 750px;*/ 220 | height: 200px; 221 | } 222 | 223 | #fragmentCanvas2 { 224 | 225 | float: right; 226 | } 227 | 228 | .distanceOutput { 229 | width: 308px; 230 | float: left; 231 | } 232 | 233 | .distanceOutputTitle { 234 | padding-top: 30px; 235 | font-size: 16px; 236 | color: rgba(0, 0, 0, .87); 237 | text-align: center; 238 | width: 100%; 239 | } 240 | 241 | .distanceOutputData { 242 | width: 100%; 243 | text-align: center; 244 | font-size: 20px; 245 | color: black; 246 | } 247 | 248 | .valid { 249 | color: #2c662e; 250 | } 251 | 252 | .invalid { 253 | color: #f44336; 254 | } 255 | 256 | .fragmentCanvasWrapper { 257 | /*width: 165px;*/ 258 | float: left; 259 | } 260 | 261 | .mainContentContainerWrapper { 262 | -webkit-touch-callout: none; 263 | -webkit-user-select: none; 264 | -khtml-user-select: none; 265 | -moz-user-select: none; 266 | -ms-user-select: none; 267 | user-select: none; 268 | background-color: #f7f7f7; 269 | width: 100%; 270 | min-width: 1200px; 271 | padding-bottom: 60px; 272 | height: 700px; 273 | } 274 | 275 | .transformationsLabel { 276 | font-size: 12px; 277 | } 278 | 279 | .selectedTriangleTR { 280 | background-color: rgba(24, 61, 78, .9); 281 | color: white; 282 | } 283 | 284 | .canvasBottom { 285 | position: absolute; 286 | } 287 | 288 | .canvasTop { 289 | position: absolute; 290 | } 291 | 292 | .canvasClickThrough { 293 | position: absolute; 294 | pointer-events: none; 295 | } 296 | 297 | .twoCanvasWrapper { 298 | } 299 | 300 | .interactiveCanvasWrapper { 301 | width: 280px; 302 | height: 280px; 303 | cursor: pointer; 304 | touch-action: none; 305 | } 306 | 307 | .referenceCanvasWrapper { 308 | width: 560px; 309 | height: 280px; 310 | } 311 | 312 | .canvasShadow { 313 | /*border: 1px solid black;*/ 314 | box-shadow: 0px 0px 8px 2px rgba(24, 61, 78, .2); 315 | } 316 | 317 | .moreWrapper { 318 | position: relative; 319 | width: 100%; 320 | min-width: 1200px; 321 | } 322 | 323 | .more { 324 | position: absolute; 325 | left: 50%; 326 | top: 50%; 327 | } 328 | 329 | .moreButton { 330 | position: absolute; 331 | border-radius: 50%; 332 | width: 56px; 333 | height: 56px; 334 | box-shadow: 0 4px 5px 0 rgba(0, 0, 0, .14), 0 1px 10px 0 rgba(0, 0, 0, .12), 0 2px 4px -1px rgba(0, 0, 0, .2); 335 | border: 0px; 336 | background-color: white; 337 | top: -28px; 338 | left: -28px; 339 | } 340 | 341 | body:after { 342 | display: none; 343 | } 344 | 345 | .dropdown-menu li { 346 | cursor: pointer; 347 | } 348 | 349 | .backgroundColourGrey { 350 | background-color: #eee; 351 | } 352 | 353 | .triangleList tr { 354 | text-align: center; 355 | } 356 | 357 | #matchNoMatchLabel { 358 | text-align: center; 359 | } 360 | 361 | p { 362 | font-size: 18px; 363 | } 364 | 365 | .figureText { 366 | margin-top: 20px; 367 | text-align: center; 368 | padding: 0px 40px; 369 | width: 100%; 370 | } 371 | 372 | .articleContent { 373 | margin: 40px 0px; 374 | } 375 | 376 | .affineDemoVideoWrapper { 377 | } 378 | 379 | .affineDemoVideo { 380 | width: 700px; 381 | height: 300px; 382 | } 383 | 384 | .mortyVideoWrapper { 385 | 386 | } 387 | 388 | .mortyVideo { 389 | width: 700px; 390 | height: 370px; 391 | } 392 | 393 | .compositeMatchingImageWrapper { 394 | margin-top: 80px; 395 | } 396 | 397 | .stepsWrapper { 398 | padding: 0px 40px 30px 40px; 399 | font-size: 18px; 400 | } 401 | 402 | .stepTextWrapper { 403 | float: left; 404 | display: block; 405 | width: 100%; 406 | padding: 10px 0px; 407 | } 408 | 409 | .boldListNumber { 410 | float: left; 411 | font-weight: bold; 412 | text-align: right; 413 | width: 20px; 414 | margin: 0px 10px; 415 | } 416 | 417 | .stepText { 418 | float: left; 419 | width: 500px; 420 | } 421 | 422 | #queryImageCanvasUiOverlay { 423 | touch-action: none; 424 | } 425 | 426 | footer { 427 | border-top: solid 1px #eee; 428 | background-color: #f7f7f7; 429 | width: 100%; 430 | min-width: 1200px; 431 | height: 150px; 432 | } 433 | 434 | .footerContents { 435 | margin: auto; 436 | width: 600px; 437 | padding: 40px 0px; 438 | text-align: center; 439 | } 440 | 441 | .styleCopied { 442 | width: 200px; 443 | display: block; 444 | float: left; 445 | } 446 | 447 | .styleCopiedLink { 448 | font-size: 13px; 449 | color: #999; 450 | } 451 | 452 | .contactMe { 453 | font-size: 15px; 454 | color: #999; 455 | margin-top: 40px; 456 | } 457 | 458 | .github-link { 459 | width: 60px; 460 | height: 60px; 461 | position: absolute; 462 | display: block; 463 | top: 0; 464 | right: 0; 465 | z-index: 1000; 466 | } 467 | 468 | .github-link .bg { 469 | fill: #fff; 470 | fill-opacity: 0.2; 471 | } 472 | 473 | .github-link:hover .bg { 474 | fill-opacity: 0.3; 475 | } 476 | 477 | .github-link .icon { 478 | fill: #fff; 479 | fill-opacity: 0.6; 480 | } 481 | -------------------------------------------------------------------------------- /fullEndToEndDemo/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0) 2 | project(imageHashTesting) 3 | 4 | set(CMAKE_CXX_STANDARD 11) 5 | 6 | set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/") 7 | 8 | find_package(Boost REQUIRED) 9 | 10 | #find_package(hiredis REQUIRED) 11 | 12 | include_directories(${Boost_INCLUDE_DIRS}) 13 | include_directories(include) 14 | 15 | find_package(OpenMP REQUIRED) 16 | 17 | set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") 18 | set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") 19 | 20 | include_directories(src) 21 | 22 | include_directories(include) 23 | 24 | SET(TARGET_H 25 | src/PerceptualHash.h 26 | src/FragmentHash.h 27 | src/Keypoint.h 28 | src/ShapeAndPositionInvariantImage.h 29 | src/Triangle.h 30 | src/mainImageProcessingFunctions.hpp 31 | ) 32 | 33 | set(IMAGE_HASH_FILES 34 | src/img_hash_opencv_module/phash.cpp 35 | src/img_hash_opencv_module/PHash_Fast.cpp 36 | ) 37 | 38 | 39 | set(SOURCE_FILE 40 | src/main.cc 41 | ${IMAGE_HASH_FILES} 42 | ) 43 | 44 | find_package( OpenCV REQUIRED ) 45 | include_directories(${OpenCV_INCLUDE_DIRS}) 46 | 47 | #set( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin ) 48 | set( CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib ) 49 | 50 | add_executable(runDemo ${SOURCE_FILE} ${TARGET_H}) 51 | 52 | link_directories(lib) 53 | target_link_libraries(runDemo ${OpenCV_LIBS}) 54 | target_link_libraries(runDemo ${CMAKE_CURRENT_SOURCE_DIR}/lib/libhiredis.a) 55 | 56 | # Options. Turn on with 'cmake -Dmyvarname=ON'. 57 | 58 | set(CMAKE_CXX_FLAGS "-std=c++11 -fopenmp -g -Wall") 59 | 60 | 61 | 62 | get_property(dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES) 63 | foreach(dir ${dirs}) 64 | message(STATUS "dir='${dir}'") 65 | endforeach() 66 | -------------------------------------------------------------------------------- /fullEndToEndDemo/include/hiredis/async.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2009-2011, Salvatore Sanfilippo 3 | * Copyright (c) 2010-2011, Pieter Noordhuis 4 | * 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without 8 | * modification, are permitted provided that the following conditions are met: 9 | * 10 | * * Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * * Redistributions in binary form must reproduce the above copyright 13 | * notice, this list of conditions and the following disclaimer in the 14 | * documentation and/or other materials provided with the distribution. 15 | * * Neither the name of Redis nor the names of its contributors may be used 16 | * to endorse or promote products derived from this software without 17 | * specific prior written permission. 18 | * 19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 | * POSSIBILITY OF SUCH DAMAGE. 30 | */ 31 | 32 | #ifndef __HIREDIS_ASYNC_H 33 | #define __HIREDIS_ASYNC_H 34 | #include "hiredis.h" 35 | 36 | #ifdef __cplusplus 37 | extern "C" { 38 | #endif 39 | 40 | struct redisAsyncContext; /* need forward declaration of redisAsyncContext */ 41 | struct dict; /* dictionary header is included in async.c */ 42 | 43 | /* Reply callback prototype and container */ 44 | typedef void (redisCallbackFn)(struct redisAsyncContext*, void*, void*); 45 | typedef struct redisCallback { 46 | struct redisCallback *next; /* simple singly linked list */ 47 | redisCallbackFn *fn; 48 | void *privdata; 49 | } redisCallback; 50 | 51 | /* List of callbacks for either regular replies or pub/sub */ 52 | typedef struct redisCallbackList { 53 | redisCallback *head, *tail; 54 | } redisCallbackList; 55 | 56 | /* Connection callback prototypes */ 57 | typedef void (redisDisconnectCallback)(const struct redisAsyncContext*, int status); 58 | typedef void (redisConnectCallback)(const struct redisAsyncContext*, int status); 59 | 60 | /* Context for an async connection to Redis */ 61 | typedef struct redisAsyncContext { 62 | /* Hold the regular context, so it can be realloc'ed. */ 63 | redisContext c; 64 | 65 | /* Setup error flags so they can be used directly. */ 66 | int err; 67 | char *errstr; 68 | 69 | /* Not used by hiredis */ 70 | void *data; 71 | 72 | /* Event library data and hooks */ 73 | struct { 74 | void *data; 75 | 76 | /* Hooks that are called when the library expects to start 77 | * reading/writing. These functions should be idempotent. */ 78 | void (*addRead)(void *privdata); 79 | void (*delRead)(void *privdata); 80 | void (*addWrite)(void *privdata); 81 | void (*delWrite)(void *privdata); 82 | void (*cleanup)(void *privdata); 83 | } ev; 84 | 85 | /* Called when either the connection is terminated due to an error or per 86 | * user request. The status is set accordingly (REDIS_OK, REDIS_ERR). */ 87 | redisDisconnectCallback *onDisconnect; 88 | 89 | /* Called when the first write event was received. */ 90 | redisConnectCallback *onConnect; 91 | 92 | /* Regular command callbacks */ 93 | redisCallbackList replies; 94 | 95 | /* Subscription callbacks */ 96 | struct { 97 | redisCallbackList invalid; 98 | struct dict *channels; 99 | struct dict *patterns; 100 | } sub; 101 | } redisAsyncContext; 102 | 103 | /* Functions that proxy to hiredis */ 104 | redisAsyncContext *redisAsyncConnect(const char *ip, int port); 105 | redisAsyncContext *redisAsyncConnectBind(const char *ip, int port, const char *source_addr); 106 | redisAsyncContext *redisAsyncConnectBindWithReuse(const char *ip, int port, 107 | const char *source_addr); 108 | redisAsyncContext *redisAsyncConnectUnix(const char *path); 109 | int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn); 110 | int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn); 111 | void redisAsyncDisconnect(redisAsyncContext *ac); 112 | void redisAsyncFree(redisAsyncContext *ac); 113 | 114 | /* Handle read/write events */ 115 | void redisAsyncHandleRead(redisAsyncContext *ac); 116 | void redisAsyncHandleWrite(redisAsyncContext *ac); 117 | 118 | /* Command functions for an async context. Write the command to the 119 | * output buffer and register the provided callback. */ 120 | int redisvAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, va_list ap); 121 | int redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, ...); 122 | int redisAsyncCommandArgv(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, int argc, const char **argv, const size_t *argvlen); 123 | int redisAsyncFormattedCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *cmd, size_t len); 124 | 125 | #ifdef __cplusplus 126 | } 127 | #endif 128 | 129 | #endif 130 | -------------------------------------------------------------------------------- /fullEndToEndDemo/include/hiredis/dict.h: -------------------------------------------------------------------------------- 1 | /* Hash table implementation. 2 | * 3 | * This file implements in memory hash tables with insert/del/replace/find/ 4 | * get-random-element operations. Hash tables will auto resize if needed 5 | * tables of power of two in size are used, collisions are handled by 6 | * chaining. See the source code for more information... :) 7 | * 8 | * Copyright (c) 2006-2010, Salvatore Sanfilippo 9 | * All rights reserved. 10 | * 11 | * Redistribution and use in source and binary forms, with or without 12 | * modification, are permitted provided that the following conditions are met: 13 | * 14 | * * Redistributions of source code must retain the above copyright notice, 15 | * this list of conditions and the following disclaimer. 16 | * * Redistributions in binary form must reproduce the above copyright 17 | * notice, this list of conditions and the following disclaimer in the 18 | * documentation and/or other materials provided with the distribution. 19 | * * Neither the name of Redis nor the names of its contributors may be used 20 | * to endorse or promote products derived from this software without 21 | * specific prior written permission. 22 | * 23 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 | * POSSIBILITY OF SUCH DAMAGE. 34 | */ 35 | 36 | #ifndef __DICT_H 37 | #define __DICT_H 38 | 39 | #define DICT_OK 0 40 | #define DICT_ERR 1 41 | 42 | /* Unused arguments generate annoying warnings... */ 43 | #define DICT_NOTUSED(V) ((void) V) 44 | 45 | typedef struct dictEntry { 46 | void *key; 47 | void *val; 48 | struct dictEntry *next; 49 | } dictEntry; 50 | 51 | typedef struct dictType { 52 | unsigned int (*hashFunction)(const void *key); 53 | void *(*keyDup)(void *privdata, const void *key); 54 | void *(*valDup)(void *privdata, const void *obj); 55 | int (*keyCompare)(void *privdata, const void *key1, const void *key2); 56 | void (*keyDestructor)(void *privdata, void *key); 57 | void (*valDestructor)(void *privdata, void *obj); 58 | } dictType; 59 | 60 | typedef struct dict { 61 | dictEntry **table; 62 | dictType *type; 63 | unsigned long size; 64 | unsigned long sizemask; 65 | unsigned long used; 66 | void *privdata; 67 | } dict; 68 | 69 | typedef struct dictIterator { 70 | dict *ht; 71 | int index; 72 | dictEntry *entry, *nextEntry; 73 | } dictIterator; 74 | 75 | /* This is the initial size of every hash table */ 76 | #define DICT_HT_INITIAL_SIZE 4 77 | 78 | /* ------------------------------- Macros ------------------------------------*/ 79 | #define dictFreeEntryVal(ht, entry) \ 80 | if ((ht)->type->valDestructor) \ 81 | (ht)->type->valDestructor((ht)->privdata, (entry)->val) 82 | 83 | #define dictSetHashVal(ht, entry, _val_) do { \ 84 | if ((ht)->type->valDup) \ 85 | entry->val = (ht)->type->valDup((ht)->privdata, _val_); \ 86 | else \ 87 | entry->val = (_val_); \ 88 | } while(0) 89 | 90 | #define dictFreeEntryKey(ht, entry) \ 91 | if ((ht)->type->keyDestructor) \ 92 | (ht)->type->keyDestructor((ht)->privdata, (entry)->key) 93 | 94 | #define dictSetHashKey(ht, entry, _key_) do { \ 95 | if ((ht)->type->keyDup) \ 96 | entry->key = (ht)->type->keyDup((ht)->privdata, _key_); \ 97 | else \ 98 | entry->key = (_key_); \ 99 | } while(0) 100 | 101 | #define dictCompareHashKeys(ht, key1, key2) \ 102 | (((ht)->type->keyCompare) ? \ 103 | (ht)->type->keyCompare((ht)->privdata, key1, key2) : \ 104 | (key1) == (key2)) 105 | 106 | #define dictHashKey(ht, key) (ht)->type->hashFunction(key) 107 | 108 | #define dictGetEntryKey(he) ((he)->key) 109 | #define dictGetEntryVal(he) ((he)->val) 110 | #define dictSlots(ht) ((ht)->size) 111 | #define dictSize(ht) ((ht)->used) 112 | 113 | /* API */ 114 | static unsigned int dictGenHashFunction(const unsigned char *buf, int len); 115 | static dict *dictCreate(dictType *type, void *privDataPtr); 116 | static int dictExpand(dict *ht, unsigned long size); 117 | static int dictAdd(dict *ht, void *key, void *val); 118 | static int dictReplace(dict *ht, void *key, void *val); 119 | static int dictDelete(dict *ht, const void *key); 120 | static void dictRelease(dict *ht); 121 | static dictEntry * dictFind(dict *ht, const void *key); 122 | static dictIterator *dictGetIterator(dict *ht); 123 | static dictEntry *dictNext(dictIterator *iter); 124 | static void dictReleaseIterator(dictIterator *iter); 125 | 126 | #endif /* __DICT_H */ 127 | -------------------------------------------------------------------------------- /fullEndToEndDemo/include/hiredis/fmacros.h: -------------------------------------------------------------------------------- 1 | #ifndef __HIREDIS_FMACRO_H 2 | #define __HIREDIS_FMACRO_H 3 | 4 | #if defined(__linux__) 5 | #define _BSD_SOURCE 6 | #define _DEFAULT_SOURCE 7 | #endif 8 | 9 | #if defined(__CYGWIN__) 10 | #include 11 | #endif 12 | 13 | #if defined(__sun__) 14 | #define _POSIX_C_SOURCE 200112L 15 | #else 16 | #if !(defined(__APPLE__) && defined(__MACH__)) 17 | #define _XOPEN_SOURCE 600 18 | #endif 19 | #endif 20 | 21 | #if defined(__APPLE__) && defined(__MACH__) 22 | #define _OSX 23 | #endif 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /fullEndToEndDemo/include/hiredis/hiredis.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2009-2011, Salvatore Sanfilippo 3 | * Copyright (c) 2010-2014, Pieter Noordhuis 4 | * Copyright (c) 2015, Matt Stancliff , 5 | * Jan-Erik Rediger 6 | * 7 | * All rights reserved. 8 | * 9 | * Redistribution and use in source and binary forms, with or without 10 | * modification, are permitted provided that the following conditions are met: 11 | * 12 | * * Redistributions of source code must retain the above copyright notice, 13 | * this list of conditions and the following disclaimer. 14 | * * Redistributions in binary form must reproduce the above copyright 15 | * notice, this list of conditions and the following disclaimer in the 16 | * documentation and/or other materials provided with the distribution. 17 | * * Neither the name of Redis nor the names of its contributors may be used 18 | * to endorse or promote products derived from this software without 19 | * specific prior written permission. 20 | * 21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 | * POSSIBILITY OF SUCH DAMAGE. 32 | */ 33 | 34 | #ifndef __HIREDIS_H 35 | #define __HIREDIS_H 36 | #include "read.h" 37 | #include /* for va_list */ 38 | #include /* for struct timeval */ 39 | #include /* uintXX_t, etc */ 40 | #include "sds.h" /* for sds */ 41 | 42 | #define HIREDIS_MAJOR 0 43 | #define HIREDIS_MINOR 13 44 | #define HIREDIS_PATCH 3 45 | #define HIREDIS_SONAME 0.13 46 | 47 | /* Connection type can be blocking or non-blocking and is set in the 48 | * least significant bit of the flags field in redisContext. */ 49 | #define REDIS_BLOCK 0x1 50 | 51 | /* Connection may be disconnected before being free'd. The second bit 52 | * in the flags field is set when the context is connected. */ 53 | #define REDIS_CONNECTED 0x2 54 | 55 | /* The async API might try to disconnect cleanly and flush the output 56 | * buffer and read all subsequent replies before disconnecting. 57 | * This flag means no new commands can come in and the connection 58 | * should be terminated once all replies have been read. */ 59 | #define REDIS_DISCONNECTING 0x4 60 | 61 | /* Flag specific to the async API which means that the context should be clean 62 | * up as soon as possible. */ 63 | #define REDIS_FREEING 0x8 64 | 65 | /* Flag that is set when an async callback is executed. */ 66 | #define REDIS_IN_CALLBACK 0x10 67 | 68 | /* Flag that is set when the async context has one or more subscriptions. */ 69 | #define REDIS_SUBSCRIBED 0x20 70 | 71 | /* Flag that is set when monitor mode is active */ 72 | #define REDIS_MONITORING 0x40 73 | 74 | /* Flag that is set when we should set SO_REUSEADDR before calling bind() */ 75 | #define REDIS_REUSEADDR 0x80 76 | 77 | #define REDIS_KEEPALIVE_INTERVAL 15 /* seconds */ 78 | 79 | /* number of times we retry to connect in the case of EADDRNOTAVAIL and 80 | * SO_REUSEADDR is being used. */ 81 | #define REDIS_CONNECT_RETRIES 10 82 | 83 | /* strerror_r has two completely different prototypes and behaviors 84 | * depending on system issues, so we need to operate on the error buffer 85 | * differently depending on which strerror_r we're using. */ 86 | #ifndef _GNU_SOURCE 87 | /* "regular" POSIX strerror_r that does the right thing. */ 88 | #define __redis_strerror_r(errno, buf, len) \ 89 | do { \ 90 | strerror_r((errno), (buf), (len)); \ 91 | } while (0) 92 | #else 93 | /* "bad" GNU strerror_r we need to clean up after. */ 94 | #define __redis_strerror_r(errno, buf, len) \ 95 | do { \ 96 | char *err_str = strerror_r((errno), (buf), (len)); \ 97 | /* If return value _isn't_ the start of the buffer we passed in, \ 98 | * then GNU strerror_r returned an internal static buffer and we \ 99 | * need to copy the result into our private buffer. */ \ 100 | if (err_str != (buf)) { \ 101 | strncpy((buf), err_str, ((len) - 1)); \ 102 | buf[(len)-1] = '\0'; \ 103 | } \ 104 | } while (0) 105 | #endif 106 | 107 | #ifdef __cplusplus 108 | extern "C" { 109 | #endif 110 | 111 | /* This is the reply object returned by redisCommand() */ 112 | typedef struct redisReply { 113 | int type; /* REDIS_REPLY_* */ 114 | long long integer; /* The integer when type is REDIS_REPLY_INTEGER */ 115 | size_t len; /* Length of string */ 116 | char *str; /* Used for both REDIS_REPLY_ERROR and REDIS_REPLY_STRING */ 117 | size_t elements; /* number of elements, for REDIS_REPLY_ARRAY */ 118 | struct redisReply **element; /* elements vector for REDIS_REPLY_ARRAY */ 119 | } redisReply; 120 | 121 | redisReader *redisReaderCreate(void); 122 | 123 | /* Function to free the reply objects hiredis returns by default. */ 124 | void freeReplyObject(void *reply); 125 | 126 | /* Functions to format a command according to the protocol. */ 127 | int redisvFormatCommand(char **target, const char *format, va_list ap); 128 | int redisFormatCommand(char **target, const char *format, ...); 129 | int redisFormatCommandArgv(char **target, int argc, const char **argv, const size_t *argvlen); 130 | int redisFormatSdsCommandArgv(sds *target, int argc, const char ** argv, const size_t *argvlen); 131 | void redisFreeCommand(char *cmd); 132 | void redisFreeSdsCommand(sds cmd); 133 | 134 | enum redisConnectionType { 135 | REDIS_CONN_TCP, 136 | REDIS_CONN_UNIX 137 | }; 138 | 139 | /* Context for a connection to Redis */ 140 | typedef struct redisContext { 141 | int err; /* Error flags, 0 when there is no error */ 142 | char errstr[128]; /* String representation of error when applicable */ 143 | int fd; 144 | int flags; 145 | char *obuf; /* Write buffer */ 146 | redisReader *reader; /* Protocol reader */ 147 | 148 | enum redisConnectionType connection_type; 149 | struct timeval *timeout; 150 | 151 | struct { 152 | char *host; 153 | char *source_addr; 154 | int port; 155 | } tcp; 156 | 157 | struct { 158 | char *path; 159 | } unix_sock; 160 | 161 | } redisContext; 162 | 163 | redisContext *redisConnect(const char *ip, int port); 164 | redisContext *redisConnectWithTimeout(const char *ip, int port, const struct timeval tv); 165 | redisContext *redisConnectNonBlock(const char *ip, int port); 166 | redisContext *redisConnectBindNonBlock(const char *ip, int port, 167 | const char *source_addr); 168 | redisContext *redisConnectBindNonBlockWithReuse(const char *ip, int port, 169 | const char *source_addr); 170 | redisContext *redisConnectUnix(const char *path); 171 | redisContext *redisConnectUnixWithTimeout(const char *path, const struct timeval tv); 172 | redisContext *redisConnectUnixNonBlock(const char *path); 173 | redisContext *redisConnectFd(int fd); 174 | 175 | /** 176 | * Reconnect the given context using the saved information. 177 | * 178 | * This re-uses the exact same connect options as in the initial connection. 179 | * host, ip (or path), timeout and bind address are reused, 180 | * flags are used unmodified from the existing context. 181 | * 182 | * Returns REDIS_OK on successful connect or REDIS_ERR otherwise. 183 | */ 184 | int redisReconnect(redisContext *c); 185 | 186 | int redisSetTimeout(redisContext *c, const struct timeval tv); 187 | int redisEnableKeepAlive(redisContext *c); 188 | void redisFree(redisContext *c); 189 | int redisFreeKeepFd(redisContext *c); 190 | int redisBufferRead(redisContext *c); 191 | int redisBufferWrite(redisContext *c, int *done); 192 | 193 | /* In a blocking context, this function first checks if there are unconsumed 194 | * replies to return and returns one if so. Otherwise, it flushes the output 195 | * buffer to the socket and reads until it has a reply. In a non-blocking 196 | * context, it will return unconsumed replies until there are no more. */ 197 | int redisGetReply(redisContext *c, void **reply); 198 | int redisGetReplyFromReader(redisContext *c, void **reply); 199 | 200 | /* Write a formatted command to the output buffer. Use these functions in blocking mode 201 | * to get a pipeline of commands. */ 202 | int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len); 203 | 204 | /* Write a command to the output buffer. Use these functions in blocking mode 205 | * to get a pipeline of commands. */ 206 | int redisvAppendCommand(redisContext *c, const char *format, va_list ap); 207 | int redisAppendCommand(redisContext *c, const char *format, ...); 208 | int redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); 209 | 210 | /* Issue a command to Redis. In a blocking context, it is identical to calling 211 | * redisAppendCommand, followed by redisGetReply. The function will return 212 | * NULL if there was an error in performing the request, otherwise it will 213 | * return the reply. In a non-blocking context, it is identical to calling 214 | * only redisAppendCommand and will always return NULL. */ 215 | void *redisvCommand(redisContext *c, const char *format, va_list ap); 216 | void *redisCommand(redisContext *c, const char *format, ...); 217 | void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); 218 | 219 | #ifdef __cplusplus 220 | } 221 | #endif 222 | 223 | #endif 224 | -------------------------------------------------------------------------------- /fullEndToEndDemo/include/hiredis/net.h: -------------------------------------------------------------------------------- 1 | /* Extracted from anet.c to work properly with Hiredis error reporting. 2 | * 3 | * Copyright (c) 2009-2011, Salvatore Sanfilippo 4 | * Copyright (c) 2010-2014, Pieter Noordhuis 5 | * Copyright (c) 2015, Matt Stancliff , 6 | * Jan-Erik Rediger 7 | * 8 | * All rights reserved. 9 | * 10 | * Redistribution and use in source and binary forms, with or without 11 | * modification, are permitted provided that the following conditions are met: 12 | * 13 | * * Redistributions of source code must retain the above copyright notice, 14 | * this list of conditions and the following disclaimer. 15 | * * Redistributions in binary form must reproduce the above copyright 16 | * notice, this list of conditions and the following disclaimer in the 17 | * documentation and/or other materials provided with the distribution. 18 | * * Neither the name of Redis nor the names of its contributors may be used 19 | * to endorse or promote products derived from this software without 20 | * specific prior written permission. 21 | * 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 | * POSSIBILITY OF SUCH DAMAGE. 33 | */ 34 | 35 | #ifndef __NET_H 36 | #define __NET_H 37 | 38 | #include "hiredis.h" 39 | 40 | #if defined(__sun) 41 | #define AF_LOCAL AF_UNIX 42 | #endif 43 | 44 | int redisCheckSocketError(redisContext *c); 45 | int redisContextSetTimeout(redisContext *c, const struct timeval tv); 46 | int redisContextConnectTcp(redisContext *c, const char *addr, int port, const struct timeval *timeout); 47 | int redisContextConnectBindTcp(redisContext *c, const char *addr, int port, 48 | const struct timeval *timeout, 49 | const char *source_addr); 50 | int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout); 51 | int redisKeepAlive(redisContext *c, int interval); 52 | 53 | #endif 54 | -------------------------------------------------------------------------------- /fullEndToEndDemo/include/hiredis/read.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2009-2011, Salvatore Sanfilippo 3 | * Copyright (c) 2010-2011, Pieter Noordhuis 4 | * 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without 8 | * modification, are permitted provided that the following conditions are met: 9 | * 10 | * * Redistributions of source code must retain the above copyright notice, 11 | * this list of conditions and the following disclaimer. 12 | * * Redistributions in binary form must reproduce the above copyright 13 | * notice, this list of conditions and the following disclaimer in the 14 | * documentation and/or other materials provided with the distribution. 15 | * * Neither the name of Redis nor the names of its contributors may be used 16 | * to endorse or promote products derived from this software without 17 | * specific prior written permission. 18 | * 19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 | * POSSIBILITY OF SUCH DAMAGE. 30 | */ 31 | 32 | 33 | #ifndef __HIREDIS_READ_H 34 | #define __HIREDIS_READ_H 35 | #include /* for size_t */ 36 | 37 | #define REDIS_ERR -1 38 | #define REDIS_OK 0 39 | 40 | /* When an error occurs, the err flag in a context is set to hold the type of 41 | * error that occurred. REDIS_ERR_IO means there was an I/O error and you 42 | * should use the "errno" variable to find out what is wrong. 43 | * For other values, the "errstr" field will hold a description. */ 44 | #define REDIS_ERR_IO 1 /* Error in read or write */ 45 | #define REDIS_ERR_EOF 3 /* End of file */ 46 | #define REDIS_ERR_PROTOCOL 4 /* Protocol error */ 47 | #define REDIS_ERR_OOM 5 /* Out of memory */ 48 | #define REDIS_ERR_OTHER 2 /* Everything else... */ 49 | 50 | #define REDIS_REPLY_STRING 1 51 | #define REDIS_REPLY_ARRAY 2 52 | #define REDIS_REPLY_INTEGER 3 53 | #define REDIS_REPLY_NIL 4 54 | #define REDIS_REPLY_STATUS 5 55 | #define REDIS_REPLY_ERROR 6 56 | 57 | #define REDIS_READER_MAX_BUF (1024*16) /* Default max unused reader buffer. */ 58 | 59 | #ifdef __cplusplus 60 | extern "C" { 61 | #endif 62 | 63 | typedef struct redisReadTask { 64 | int type; 65 | int elements; /* number of elements in multibulk container */ 66 | int idx; /* index in parent (array) object */ 67 | void *obj; /* holds user-generated value for a read task */ 68 | struct redisReadTask *parent; /* parent task */ 69 | void *privdata; /* user-settable arbitrary field */ 70 | } redisReadTask; 71 | 72 | typedef struct redisReplyObjectFunctions { 73 | void *(*createString)(const redisReadTask*, char*, size_t); 74 | void *(*createArray)(const redisReadTask*, int); 75 | void *(*createInteger)(const redisReadTask*, long long); 76 | void *(*createNil)(const redisReadTask*); 77 | void (*freeObject)(void*); 78 | } redisReplyObjectFunctions; 79 | 80 | typedef struct redisReader { 81 | int err; /* Error flags, 0 when there is no error */ 82 | char errstr[128]; /* String representation of error when applicable */ 83 | 84 | char *buf; /* Read buffer */ 85 | size_t pos; /* Buffer cursor */ 86 | size_t len; /* Buffer length */ 87 | size_t maxbuf; /* Max length of unused buffer */ 88 | 89 | redisReadTask rstack[9]; 90 | int ridx; /* Index of current read task */ 91 | void *reply; /* Temporary reply pointer */ 92 | 93 | redisReplyObjectFunctions *fn; 94 | void *privdata; 95 | } redisReader; 96 | 97 | /* Public API for the protocol parser. */ 98 | redisReader *redisReaderCreateWithFunctions(redisReplyObjectFunctions *fn); 99 | void redisReaderFree(redisReader *r); 100 | int redisReaderFeed(redisReader *r, const char *buf, size_t len); 101 | int redisReaderGetReply(redisReader *r, void **reply); 102 | 103 | #define redisReaderSetPrivdata(_r, _p) (int)(((redisReader*)(_r))->privdata = (_p)) 104 | #define redisReaderGetObject(_r) (((redisReader*)(_r))->reply) 105 | #define redisReaderGetError(_r) (((redisReader*)(_r))->errstr) 106 | 107 | #ifdef __cplusplus 108 | } 109 | #endif 110 | 111 | #endif 112 | -------------------------------------------------------------------------------- /fullEndToEndDemo/include/hiredis/sds.h: -------------------------------------------------------------------------------- 1 | /* SDSLib 2.0 -- A C dynamic strings library 2 | * 3 | * Copyright (c) 2006-2015, Salvatore Sanfilippo 4 | * Copyright (c) 2015, Oran Agra 5 | * Copyright (c) 2015, Redis Labs, Inc 6 | * All rights reserved. 7 | * 8 | * Redistribution and use in source and binary forms, with or without 9 | * modification, are permitted provided that the following conditions are met: 10 | * 11 | * * Redistributions of source code must retain the above copyright notice, 12 | * this list of conditions and the following disclaimer. 13 | * * Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * * Neither the name of Redis nor the names of its contributors may be used 17 | * to endorse or promote products derived from this software without 18 | * specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 | * POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef __SDS_H 34 | #define __SDS_H 35 | 36 | #define SDS_MAX_PREALLOC (1024*1024) 37 | 38 | #include 39 | #include 40 | #include 41 | 42 | typedef char *sds; 43 | 44 | /* Note: sdshdr5 is never used, we just access the flags byte directly. 45 | * However is here to document the layout of type 5 SDS strings. */ 46 | struct __attribute__ ((__packed__)) sdshdr5 { 47 | unsigned char flags; /* 3 lsb of type, and 5 msb of string length */ 48 | char buf[]; 49 | }; 50 | struct __attribute__ ((__packed__)) sdshdr8 { 51 | uint8_t len; /* used */ 52 | uint8_t alloc; /* excluding the header and null terminator */ 53 | unsigned char flags; /* 3 lsb of type, 5 unused bits */ 54 | char buf[]; 55 | }; 56 | struct __attribute__ ((__packed__)) sdshdr16 { 57 | uint16_t len; /* used */ 58 | uint16_t alloc; /* excluding the header and null terminator */ 59 | unsigned char flags; /* 3 lsb of type, 5 unused bits */ 60 | char buf[]; 61 | }; 62 | struct __attribute__ ((__packed__)) sdshdr32 { 63 | uint32_t len; /* used */ 64 | uint32_t alloc; /* excluding the header and null terminator */ 65 | unsigned char flags; /* 3 lsb of type, 5 unused bits */ 66 | char buf[]; 67 | }; 68 | struct __attribute__ ((__packed__)) sdshdr64 { 69 | uint64_t len; /* used */ 70 | uint64_t alloc; /* excluding the header and null terminator */ 71 | unsigned char flags; /* 3 lsb of type, 5 unused bits */ 72 | char buf[]; 73 | }; 74 | 75 | #define SDS_TYPE_5 0 76 | #define SDS_TYPE_8 1 77 | #define SDS_TYPE_16 2 78 | #define SDS_TYPE_32 3 79 | #define SDS_TYPE_64 4 80 | #define SDS_TYPE_MASK 7 81 | #define SDS_TYPE_BITS 3 82 | #define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T))); 83 | #define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T)))) 84 | #define SDS_TYPE_5_LEN(f) ((f)>>SDS_TYPE_BITS) 85 | 86 | static inline size_t sdslen(const sds s) { 87 | unsigned char flags = s[-1]; 88 | switch(flags&SDS_TYPE_MASK) { 89 | case SDS_TYPE_5: 90 | return SDS_TYPE_5_LEN(flags); 91 | case SDS_TYPE_8: 92 | return SDS_HDR(8,s)->len; 93 | case SDS_TYPE_16: 94 | return SDS_HDR(16,s)->len; 95 | case SDS_TYPE_32: 96 | return SDS_HDR(32,s)->len; 97 | case SDS_TYPE_64: 98 | return SDS_HDR(64,s)->len; 99 | } 100 | return 0; 101 | } 102 | 103 | static inline size_t sdsavail(const sds s) { 104 | unsigned char flags = s[-1]; 105 | switch(flags&SDS_TYPE_MASK) { 106 | case SDS_TYPE_5: { 107 | return 0; 108 | } 109 | case SDS_TYPE_8: { 110 | SDS_HDR_VAR(8,s); 111 | return sh->alloc - sh->len; 112 | } 113 | case SDS_TYPE_16: { 114 | SDS_HDR_VAR(16,s); 115 | return sh->alloc - sh->len; 116 | } 117 | case SDS_TYPE_32: { 118 | SDS_HDR_VAR(32,s); 119 | return sh->alloc - sh->len; 120 | } 121 | case SDS_TYPE_64: { 122 | SDS_HDR_VAR(64,s); 123 | return sh->alloc - sh->len; 124 | } 125 | } 126 | return 0; 127 | } 128 | 129 | static inline void sdssetlen(sds s, size_t newlen) { 130 | unsigned char flags = s[-1]; 131 | switch(flags&SDS_TYPE_MASK) { 132 | case SDS_TYPE_5: 133 | { 134 | unsigned char *fp = ((unsigned char*)s)-1; 135 | *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); 136 | } 137 | break; 138 | case SDS_TYPE_8: 139 | SDS_HDR(8,s)->len = newlen; 140 | break; 141 | case SDS_TYPE_16: 142 | SDS_HDR(16,s)->len = newlen; 143 | break; 144 | case SDS_TYPE_32: 145 | SDS_HDR(32,s)->len = newlen; 146 | break; 147 | case SDS_TYPE_64: 148 | SDS_HDR(64,s)->len = newlen; 149 | break; 150 | } 151 | } 152 | 153 | static inline void sdsinclen(sds s, size_t inc) { 154 | unsigned char flags = s[-1]; 155 | switch(flags&SDS_TYPE_MASK) { 156 | case SDS_TYPE_5: 157 | { 158 | unsigned char *fp = ((unsigned char*)s)-1; 159 | unsigned char newlen = SDS_TYPE_5_LEN(flags)+inc; 160 | *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); 161 | } 162 | break; 163 | case SDS_TYPE_8: 164 | SDS_HDR(8,s)->len += inc; 165 | break; 166 | case SDS_TYPE_16: 167 | SDS_HDR(16,s)->len += inc; 168 | break; 169 | case SDS_TYPE_32: 170 | SDS_HDR(32,s)->len += inc; 171 | break; 172 | case SDS_TYPE_64: 173 | SDS_HDR(64,s)->len += inc; 174 | break; 175 | } 176 | } 177 | 178 | /* sdsalloc() = sdsavail() + sdslen() */ 179 | static inline size_t sdsalloc(const sds s) { 180 | unsigned char flags = s[-1]; 181 | switch(flags&SDS_TYPE_MASK) { 182 | case SDS_TYPE_5: 183 | return SDS_TYPE_5_LEN(flags); 184 | case SDS_TYPE_8: 185 | return SDS_HDR(8,s)->alloc; 186 | case SDS_TYPE_16: 187 | return SDS_HDR(16,s)->alloc; 188 | case SDS_TYPE_32: 189 | return SDS_HDR(32,s)->alloc; 190 | case SDS_TYPE_64: 191 | return SDS_HDR(64,s)->alloc; 192 | } 193 | return 0; 194 | } 195 | 196 | static inline void sdssetalloc(sds s, size_t newlen) { 197 | unsigned char flags = s[-1]; 198 | switch(flags&SDS_TYPE_MASK) { 199 | case SDS_TYPE_5: 200 | /* Nothing to do, this type has no total allocation info. */ 201 | break; 202 | case SDS_TYPE_8: 203 | SDS_HDR(8,s)->alloc = newlen; 204 | break; 205 | case SDS_TYPE_16: 206 | SDS_HDR(16,s)->alloc = newlen; 207 | break; 208 | case SDS_TYPE_32: 209 | SDS_HDR(32,s)->alloc = newlen; 210 | break; 211 | case SDS_TYPE_64: 212 | SDS_HDR(64,s)->alloc = newlen; 213 | break; 214 | } 215 | } 216 | 217 | sds sdsnewlen(const void *init, size_t initlen); 218 | sds sdsnew(const char *init); 219 | sds sdsempty(void); 220 | sds sdsdup(const sds s); 221 | void sdsfree(sds s); 222 | sds sdsgrowzero(sds s, size_t len); 223 | sds sdscatlen(sds s, const void *t, size_t len); 224 | sds sdscat(sds s, const char *t); 225 | sds sdscatsds(sds s, const sds t); 226 | sds sdscpylen(sds s, const char *t, size_t len); 227 | sds sdscpy(sds s, const char *t); 228 | 229 | sds sdscatvprintf(sds s, const char *fmt, va_list ap); 230 | #ifdef __GNUC__ 231 | sds sdscatprintf(sds s, const char *fmt, ...) 232 | __attribute__((format(printf, 2, 3))); 233 | #else 234 | sds sdscatprintf(sds s, const char *fmt, ...); 235 | #endif 236 | 237 | sds sdscatfmt(sds s, char const *fmt, ...); 238 | sds sdstrim(sds s, const char *cset); 239 | void sdsrange(sds s, int start, int end); 240 | void sdsupdatelen(sds s); 241 | void sdsclear(sds s); 242 | int sdscmp(const sds s1, const sds s2); 243 | sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count); 244 | void sdsfreesplitres(sds *tokens, int count); 245 | void sdstolower(sds s); 246 | void sdstoupper(sds s); 247 | sds sdsfromlonglong(long long value); 248 | sds sdscatrepr(sds s, const char *p, size_t len); 249 | sds *sdssplitargs(const char *line, int *argc); 250 | sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen); 251 | sds sdsjoin(char **argv, int argc, char *sep); 252 | sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen); 253 | 254 | /* Low level functions exposed to the user API */ 255 | sds sdsMakeRoomFor(sds s, size_t addlen); 256 | void sdsIncrLen(sds s, int incr); 257 | sds sdsRemoveFreeSpace(sds s); 258 | size_t sdsAllocSize(sds s); 259 | void *sdsAllocPtr(sds s); 260 | 261 | /* Export the allocator used by SDS to the program using SDS. 262 | * Sometimes the program SDS is linked to, may use a different set of 263 | * allocators, but may want to allocate or free things that SDS will 264 | * respectively free or allocate. */ 265 | void *sds_malloc(size_t size); 266 | void *sds_realloc(void *ptr, size_t size); 267 | void sds_free(void *ptr); 268 | 269 | #ifdef REDIS_TEST 270 | int sdsTest(int argc, char *argv[]); 271 | #endif 272 | 273 | #endif 274 | -------------------------------------------------------------------------------- /fullEndToEndDemo/include/hiredis/sdsalloc.h: -------------------------------------------------------------------------------- 1 | /* SDSLib 2.0 -- A C dynamic strings library 2 | * 3 | * Copyright (c) 2006-2015, Salvatore Sanfilippo 4 | * Copyright (c) 2015, Oran Agra 5 | * Copyright (c) 2015, Redis Labs, Inc 6 | * All rights reserved. 7 | * 8 | * Redistribution and use in source and binary forms, with or without 9 | * modification, are permitted provided that the following conditions are met: 10 | * 11 | * * Redistributions of source code must retain the above copyright notice, 12 | * this list of conditions and the following disclaimer. 13 | * * Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * * Neither the name of Redis nor the names of its contributors may be used 17 | * to endorse or promote products derived from this software without 18 | * specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 | * POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | /* SDS allocator selection. 34 | * 35 | * This file is used in order to change the SDS allocator at compile time. 36 | * Just define the following defines to what you want to use. Also add 37 | * the include of your alternate allocator if needed (not needed in order 38 | * to use the default libc allocator). */ 39 | 40 | #define s_malloc malloc 41 | #define s_realloc realloc 42 | #define s_free free 43 | -------------------------------------------------------------------------------- /fullEndToEndDemo/include/hiredis/win32.h: -------------------------------------------------------------------------------- 1 | #ifndef _WIN32_HELPER_INCLUDE 2 | #define _WIN32_HELPER_INCLUDE 3 | #ifdef _MSC_VER 4 | 5 | #ifndef inline 6 | #define inline __inline 7 | #endif 8 | 9 | #ifndef va_copy 10 | #define va_copy(d,s) ((d) = (s)) 11 | #endif 12 | 13 | #ifndef snprintf 14 | #define snprintf c99_snprintf 15 | 16 | __inline int c99_vsnprintf(char* str, size_t size, const char* format, va_list ap) 17 | { 18 | int count = -1; 19 | 20 | if (size != 0) 21 | count = _vsnprintf_s(str, size, _TRUNCATE, format, ap); 22 | if (count == -1) 23 | count = _vscprintf(format, ap); 24 | 25 | return count; 26 | } 27 | 28 | __inline int c99_snprintf(char* str, size_t size, const char* format, ...) 29 | { 30 | int count; 31 | va_list ap; 32 | 33 | va_start(ap, format); 34 | count = c99_vsnprintf(str, size, format, ap); 35 | va_end(ap); 36 | 37 | return count; 38 | } 39 | #endif 40 | 41 | #endif 42 | #endif -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/8cats.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/8cats.png -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/cat1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/cat1.png -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/cat2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/cat2.png -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/cat3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/cat3.png -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/cat4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/cat4.png -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/cat5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/cat5.png -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/cat6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/cat6.png -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/cat7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/cat7.png -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/cat8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/cat8.png -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/cat_original.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/cat_original.png -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/mona.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/mona.jpg -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/monaComposite.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/monaComposite.jpg -------------------------------------------------------------------------------- /fullEndToEndDemo/inputImages/van_gogh.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/inputImages/van_gogh.jpg -------------------------------------------------------------------------------- /fullEndToEndDemo/lib/libhiredis.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/lib/libhiredis.a -------------------------------------------------------------------------------- /fullEndToEndDemo/runDemo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/fullEndToEndDemo/runDemo -------------------------------------------------------------------------------- /fullEndToEndDemo/runDemo1.sh: -------------------------------------------------------------------------------- 1 | #add all cat images to db 2 | ./runDemo insert inputImages/cat* 3 | 4 | #search for matches using orginal image 5 | ./runDemo lookup inputImages/cat_original.png 6 | -------------------------------------------------------------------------------- /fullEndToEndDemo/runDemo2.sh: -------------------------------------------------------------------------------- 1 | #add the two orginal images 2 | ./runDemo insert ./inputImages/mona.jpg ./inputImages/van_gogh.jpg 3 | 4 | #query the database using the composite of the two images 5 | ./runDemo lookup ./inputImages/monaComposite.jpg 6 | -------------------------------------------------------------------------------- /fullEndToEndDemo/setup.sh: -------------------------------------------------------------------------------- 1 | #Oh boy, this is a big install, grab a cup of tea and enjoy the wait... 2 | sudo apt-get update 3 | sudo apt-get install git cmake g++ redis-server libboost-all-dev libopencv-dev python-opencv python-numpy python-scipy -y 4 | 5 | #remove annoying opencv error: https://stackoverflow.com/questions/12689304/ctypes-error-libdc1394-error-failed-to-initialize-libdc1394 6 | sudo ln /dev/null /dev/raw1394 7 | 8 | #make it 9 | cmake . 10 | make 11 | 12 | #great now we should have a binary "runDemo" 13 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/FragmentHash.h: -------------------------------------------------------------------------------- 1 | #ifndef fragment_hash_h 2 | #define fragment_hash_h 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "ShapeAndPositionInvariantImage.h" 9 | 10 | using namespace std; 11 | 12 | template class FragmentHash 13 | { 14 | private: 15 | protected: 16 | T hash_; 17 | vector shape_; 18 | public: 19 | 20 | FragmentHash() 21 | {} 22 | 23 | FragmentHash(ShapeAndPositionInvariantImage image) 24 | {} 25 | 26 | FragmentHash(string conver, std::vector shape=vector()): 27 | shape_(shape) 28 | { 29 | //convert string to hash 30 | } 31 | 32 | FragmentHash(const FragmentHash& that): 33 | hash_(that.hash_), 34 | shape_(that.shape_) 35 | {} 36 | 37 | virtual string toString() = 0; 38 | 39 | //getters and setters 40 | 41 | virtual inline T getHash() const { return hash_; } 42 | 43 | virtual vector getShape() const { return shape_; } 44 | 45 | virtual int getHammingDistance(const FragmentHash& inHash) = 0; 46 | 47 | }; 48 | 49 | #endif // fragment_hash_h 50 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/Keypoint.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | class Keypoint 9 | { 10 | public: 11 | double x, y; 12 | Keypoint() {}; 13 | 14 | Keypoint(double _x, double _y) 15 | { 16 | x = _x; 17 | y = _y; 18 | } 19 | 20 | string toString(){ 21 | std::ostringstream ss; 22 | ss << "kp[ "<< x << ", " << y << "]"; 23 | return ss.str(); 24 | } 25 | 26 | inline bool operator==(const Keypoint& rhs) const { 27 | return (x == rhs.x && y == rhs.y); 28 | } 29 | }; 30 | 31 | namespace std { 32 | 33 | template <> 34 | struct hash 35 | { 36 | std::size_t operator()(const Keypoint& k) const 37 | { 38 | using std::hash; 39 | return ((hash()(k.x) ^ (hash()(k.y) << 1)) >> 1); 40 | } 41 | }; 42 | 43 | } 44 | 45 | 46 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/PerceptualHash.h: -------------------------------------------------------------------------------- 1 | #ifndef perceptual_hash_h 2 | #define perceptual_hash_h 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "opencv2/opencv.hpp" 10 | #include "Keypoint.h" 11 | #include "FragmentHash.h" 12 | #include "img_hash_opencv_module/phash.hpp" 13 | using namespace std; 14 | 15 | 16 | namespace hashes{ 17 | class PerceptualHash : public FragmentHash> { 18 | private: 19 | vector hash; 20 | vector shape; 21 | 22 | static std::string convertHashToString(vector hash) { 23 | std::string ret = ""; 24 | int h = 0; 25 | for (unsigned int i = 0; i < hash.size(); i++) { 26 | if (hash[i]) { 27 | h += pow(2, (i % 8)); 28 | } 29 | 30 | if (i % 8 == 7) { 31 | std::stringstream buffer; 32 | buffer << std::hex << std::setfill('0') << std::setw(2) << h; 33 | ret += buffer.str(); 34 | h = 0; 35 | } 36 | } 37 | return ret; 38 | } 39 | 40 | static vector hex_str_to_hash(std::string inputString) { 41 | std::vector hash; 42 | int size = inputString.size() / 2; 43 | for (int i = 0; i < size; i++) { 44 | std::string str2 = inputString.substr(i * 2, 2); 45 | if (str2.empty()) { 46 | continue; 47 | } 48 | 49 | unsigned int value = 0; 50 | std::stringstream SS(str2); 51 | SS >> std::hex >> value; 52 | for (int j = 0; j < 8; j++) { 53 | bool check = !!((value >> j) & 1); 54 | hash.push_back(check); 55 | } 56 | } 57 | return hash; 58 | } 59 | 60 | static std::vector matHashToBoolArr(cv::Mat const inHash) { 61 | const unsigned char *data = inHash.data; 62 | std::vector v; 63 | for (int i = 0; i < 8; i++) { 64 | unsigned char c = data[i]; 65 | for (int j = 0; j < 8; j++) { 66 | int shift = (8 - j) - 1; 67 | bool val = ((c >> shift) & 1); 68 | v.push_back(val); 69 | } 70 | } 71 | return v; 72 | } 73 | 74 | static vector computeHash(cv::Mat const input) { 75 | cv::Mat inHash; 76 | auto algo = cv::img_hash::PHash(); 77 | algo.compute(input, inHash); 78 | return matHashToBoolArr(inHash); 79 | } 80 | 81 | //returns hamming distance 82 | static int getHashDistance(const FragmentHash> &first, const FragmentHash> &second) { 83 | const vector hash1 = first.getHash(); 84 | const vector hash2 = second.getHash(); 85 | assert(hash1.size() == hash2.size()); 86 | 87 | int dist = 0; 88 | for (unsigned int i = 0; i < hash1.size(); i++) { 89 | dist += (hash1[i] != hash2[i]); 90 | } 91 | return dist; 92 | } 93 | 94 | public: 95 | 96 | PerceptualHash() 97 | {} 98 | 99 | PerceptualHash(ShapeAndPositionInvariantImage frag): 100 | FragmentHash>(frag) 101 | { 102 | hash_ = computeHash(frag.getImageData()); 103 | } 104 | 105 | PerceptualHash(string getHashFromString, std::vector shape=vector()): 106 | FragmentHash>(getHashFromString, shape) 107 | { 108 | hash_ = hex_str_to_hash(getHashFromString); 109 | } 110 | 111 | PerceptualHash(const PerceptualHash& that) : 112 | FragmentHash(that) 113 | {} 114 | 115 | string toString() override 116 | { 117 | return convertHashToString(hash_); 118 | } 119 | 120 | int getHammingDistance(const FragmentHash>& inHash){ 121 | return getHashDistance(*this, inHash); 122 | } 123 | 124 | 125 | }; 126 | 127 | }//end of namespace 128 | #endif // perceptual_hash_h 129 | 130 | 131 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/PerceptualHash_Fast.h: -------------------------------------------------------------------------------- 1 | #ifndef perceptual_fast_hash_h 2 | #define perceptual_fast_hash_h 3 | 4 | #include 5 | #include 6 | #include 7 | #include "opencv2/opencv.hpp" 8 | #include "Keypoint.h" 9 | #include "FragmentHash.h" 10 | #include "img_hash_opencv_module/PHash_Fast.h" 11 | using namespace std; 12 | 13 | 14 | namespace hashes{ 15 | class PerceptualHash_Fast : public FragmentHash> { 16 | private: 17 | vector hash; 18 | vector shape; 19 | 20 | static std::string convertHashToString(vector hash) { 21 | std::string ret = ""; 22 | int h = 0; 23 | for (unsigned int i = 0; i < hash.size(); i++) { 24 | if (hash[i]) { 25 | h += pow(2, (i % 8)); 26 | } 27 | 28 | if (i % 8 == 7) { 29 | std::stringstream buffer; 30 | buffer << std::hex << std::setfill('0') << std::setw(2) << h; 31 | ret += buffer.str(); 32 | h = 0; 33 | } 34 | } 35 | return ret; 36 | } 37 | 38 | static vector hex_str_to_hash(std::string inputString) { 39 | std::vector hash; 40 | int size = inputString.size() / 2; 41 | for (int i = 0; i < size; i++) { 42 | std::string str2 = inputString.substr(i * 2, 2); 43 | if (str2.empty()) { 44 | continue; 45 | } 46 | 47 | unsigned int value = 0; 48 | std::stringstream SS(str2); 49 | SS >> std::hex >> value; 50 | for (int j = 0; j < 8; j++) { 51 | bool check = !!((value >> j) & 1); 52 | hash.push_back(check); 53 | } 54 | } 55 | return hash; 56 | } 57 | 58 | static std::vector matHashToBoolArr(cv::Mat const inHash) { 59 | const unsigned char *data = inHash.data; 60 | std::vector v; 61 | for (int i = 0; i < 8; i++) { 62 | unsigned char c = data[i]; 63 | for (int j = 0; j < 8; j++) { 64 | int shift = (8 - j) - 1; 65 | bool val = ((c >> shift) & 1); 66 | v.push_back(val); 67 | } 68 | } 69 | return v; 70 | } 71 | 72 | static vector computeHash(cv::Mat const input) { 73 | cv::Mat inHash; 74 | auto algo = cv::img_hash::PHash_Fast(); 75 | algo.compute(input, inHash); 76 | return matHashToBoolArr(inHash); 77 | } 78 | 79 | //returns hamming distance 80 | static int getHashDistance(const FragmentHash> &first, const FragmentHash> &second) { 81 | const vector hash1 = first.getHash(); 82 | const vector hash2 = second.getHash(); 83 | assert(hash1.size() == hash2.size()); 84 | 85 | int dist = 0; 86 | for (unsigned int i = 0; i < hash1.size(); i++) { 87 | dist += (hash1[i] != hash2[i]); 88 | } 89 | return dist; 90 | } 91 | 92 | public: 93 | 94 | PerceptualHash_Fast() 95 | {} 96 | 97 | PerceptualHash_Fast(ShapeAndPositionInvariantImage frag): 98 | FragmentHash>(frag) 99 | { 100 | hash_ = computeHash(frag.getImageData()); 101 | } 102 | 103 | PerceptualHash_Fast(string getHashFromString, std::vector shape=vector()): 104 | FragmentHash>(getHashFromString, shape) 105 | { 106 | hash_ = hex_str_to_hash(getHashFromString); 107 | } 108 | 109 | PerceptualHash_Fast(const PerceptualHash_Fast& that) : 110 | FragmentHash(that) 111 | {} 112 | 113 | string toString() override 114 | { 115 | return convertHashToString(hash_); 116 | } 117 | 118 | int getHammingDistance(const FragmentHash>& inHash){ 119 | return getHashDistance(*this, inHash); 120 | } 121 | 122 | 123 | }; 124 | 125 | }//end of namespace 126 | #endif // perceptual_fast_hash_h 127 | 128 | 129 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/ShapeAndPositionInvariantImage.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | 6 | #include "Keypoint.h" 7 | 8 | class ShapeAndPositionInvariantImage 9 | { 10 | public: 11 | ShapeAndPositionInvariantImage(const std::string imageName, const cv::Mat imageData, const std::vector shape, const std::string imageFullPath); 12 | 13 | const std::string& getImageName() const { return imageName_; } 14 | cv::Mat getImageData() const { return imageData_; } 15 | const std::vector& getShape() const { return shape_; } 16 | const std::string& getImageFullPath() const { return imageFullPath_; } 17 | 18 | private: 19 | const std::string imageName_; 20 | const cv::Mat imageData_; 21 | const std::vector shape_; 22 | const std::string imageFullPath_; 23 | }; 24 | 25 | inline ShapeAndPositionInvariantImage::ShapeAndPositionInvariantImage(const std::string imageName, const cv::Mat imageData, const std::vector shape, const std::string imageFullPath) 26 | : imageName_(imageName), 27 | imageData_(imageData), 28 | shape_(shape), 29 | imageFullPath_(imageFullPath) {} -------------------------------------------------------------------------------- /fullEndToEndDemo/src/Triangle.h: -------------------------------------------------------------------------------- 1 | #ifndef SRC_TRIANGLE_H 2 | #define SRC_TRIANGLE_H 3 | #include "Keypoint.h" 4 | 5 | using namespace std; 6 | 7 | class Triangle 8 | { 9 | public: 10 | 11 | inline Triangle(){ 12 | } 13 | 14 | inline Triangle(Keypoint one, Keypoint two, Keypoint three){ 15 | keypoints_[0] = one; 16 | keypoints_[1] = two; 17 | keypoints_[2] = three; 18 | } 19 | 20 | inline Triangle(vector list){ 21 | keypoints_[0] = list[0]; 22 | keypoints_[1] = list[1]; 23 | keypoints_[2] = list[2]; 24 | } 25 | Keypoint keypoints_[3]; 26 | 27 | std::vector toKeypoints() const 28 | { 29 | std::vector ret; 30 | for (int i = 0; i < 3; i++) 31 | { 32 | ret.push_back(keypoints_[i]); 33 | } 34 | return ret; 35 | } 36 | 37 | string toString() const 38 | { 39 | string ret = ""; 40 | for (auto kp: keypoints_) 41 | { 42 | ret += kp.toString() + ", "; 43 | } 44 | return ret; 45 | } 46 | 47 | double calcArea() { 48 | double x0 = keypoints_[0].x; 49 | double x1 = keypoints_[1].x; 50 | double x2 = keypoints_[2].x; 51 | 52 | double y0 = keypoints_[0].y; 53 | double y1 = keypoints_[1].y; 54 | double y2 = keypoints_[2].y; 55 | 56 | return std::abs(x0 * (y1 - y2) + x1 * (y2 - y0) + x2 * (y0 - y1)) / 2; 57 | } 58 | 59 | bool hasMatchingKeypoint(Keypoint checkingKeypoint, const Triangle &triangle) const { 60 | for (auto kp: triangle.toKeypoints()) 61 | { 62 | if (checkingKeypoint == kp) { 63 | return true; 64 | } 65 | } 66 | return false; 67 | } 68 | 69 | inline bool operator==(const Triangle& rhs) const { 70 | for (auto kp: keypoints_) 71 | { 72 | if(hasMatchingKeypoint(kp, rhs)) 73 | { 74 | //check next keypoint 75 | }else{ 76 | return false; 77 | } 78 | } 79 | return true; 80 | } 81 | }; 82 | 83 | namespace std { 84 | 85 | template <> 86 | struct hash 87 | { 88 | std::size_t operator()(const Triangle& k) const 89 | { 90 | using std::hash; 91 | auto kps = k.toKeypoints(); 92 | return ((hash()(kps[0]) ^ (hash()(kps[1]) << 1)) >> 1 ^ hash()(kps[2])); 93 | } 94 | }; 95 | 96 | } 97 | 98 | 99 | 100 | #endif //SRC_TRIANGLE_H -------------------------------------------------------------------------------- /fullEndToEndDemo/src/curvature.py: -------------------------------------------------------------------------------- 1 | """This file takes in a function (represented as a number of points and approximated using a spline) 2 | and returns the local maximums of curvature. 3 | """ 4 | 5 | 6 | import numpy as np 7 | from numpy import sin,pi,linspace 8 | from scipy.interpolate import UnivariateSpline, interp1d 9 | from scipy.integrate import quad, cumtrapz, quad_explain 10 | from scipy.signal import argrelextrema 11 | 12 | 13 | g_SmoothingForParameterization_t = None 14 | g_SmoothingForParameterization_s = None 15 | g_SmoothingForDeltaCurvature = None 16 | 17 | 18 | def arcLengthAllTheWayToT(tList, fx_t, fy_t, noOfPoints=100, subDivide=1): 19 | t = np.arange(len(tList)) * subDivide 20 | dfx = fx_t.derivative(1) 21 | dfy = fy_t.derivative(1) 22 | y_vals = np.sqrt(dfx(t) ** 2 + dfy(t) ** 2) 23 | 24 | return cumtrapz(y_vals, tList, initial=0) 25 | 26 | def convertTListToArcLengthList(tList, fx_t, fy_t): 27 | return arcLengthAllTheWayToT(tList, fx_t, fy_t, noOfPoints=len(tList)) 28 | 29 | def getParameterizedFunctionFromPoints(tList, x_pts, y_pts, smoothing=None): 30 | fx_t = UnivariateSpline(tList, x_pts, k=3, s=smoothing) 31 | fy_t = UnivariateSpline(tList, y_pts, k=3, s=smoothing) 32 | return fx_t, fy_t 33 | 34 | def reParameterizeFunctionFromPoints(tList, fx_t, fy_t, smoothing=None): 35 | #for each point (inputX[i], inputY[i]) the "arcLengthList" gives use the arc length from 0 to that point 36 | arcLengthList = convertTListToArcLengthList(tList, fx_t, fy_t) 37 | 38 | fx_s, fy_s = getParameterizedFunctionFromPoints(arcLengthList, fx_t(tList), fy_t(tList), smoothing=smoothing) 39 | return arcLengthList, fx_s, fy_s 40 | 41 | def getFirstAndSecondDerivForTPoints(arcLengthList, fx_s, fy_s): 42 | x = fx_s(arcLengthList) 43 | x_ = fx_s.derivative(1)(arcLengthList) 44 | x__ = fx_s.derivative(2)(arcLengthList) 45 | 46 | y = fy_s(arcLengthList) 47 | y_ = fy_s.derivative(1)(arcLengthList) 48 | y__ = fy_s.derivative(2)(arcLengthList) 49 | return x, x_, x__, y, y_, y__ 50 | 51 | #Note: curvature points won't be equidistant if the arcLengthList isn't 52 | def calculateCurvature(arcLengthList, fx_s, fy_s, smoothing=None): 53 | x, x_, x__, y, y_, y__ = getFirstAndSecondDerivForTPoints(arcLengthList, fx_s, fy_s) 54 | curvature = abs(x_* y__ - y_* x__) / np.power(x_** 2 + y_** 2, 3 / 2) 55 | fCurvature = UnivariateSpline(arcLengthList, curvature, s=smoothing) 56 | return curvature 57 | 58 | def parameterizeFunctionWRTArcLength(inputX, inputY): 59 | 60 | tList = np.arange(inputX.shape[0]) 61 | fx_t, fy_t = getParameterizedFunctionFromPoints(tList, inputX, inputY, smoothing=g_SmoothingForParameterization_t) 62 | 63 | arcLengthList, fx_s, fy_s = reParameterizeFunctionFromPoints(tList, fx_t, fy_t, smoothing=g_SmoothingForParameterization_s) 64 | 65 | curvature = calculateCurvature(arcLengthList, fx_s, fy_s, smoothing=g_SmoothingForDeltaCurvature) 66 | 67 | return inputX, inputY, curvature 68 | 69 | #Get the local maximums of curvature 70 | def getLocalMaximumsOfCurvature(pts, numberOfPixelsPerUnit=1): 71 | inputX, inputY = pts[:, 0], pts[:, 1] 72 | 73 | #set the scale 74 | inputX = np.multiply(inputX, 1./float(numberOfPixelsPerUnit)) 75 | inputY = np.multiply(inputY, 1./float(numberOfPixelsPerUnit)) 76 | 77 | #parameterize the function W.R.T. arc length 78 | xs, ys, curvature = parameterizeFunctionWRTArcLength(inputX, inputY) 79 | 80 | #get the local maximums of curvature 81 | localMaxima = argrelextrema(curvature, np.greater, order=2) 82 | 83 | localMaximaIndexes = localMaxima[0] 84 | xsMaxima = xs[localMaximaIndexes] 85 | ysMaxima = ys[localMaximaIndexes] 86 | 87 | fin_pts = [] 88 | for i in range(len(xsMaxima)): 89 | pt = (xsMaxima[i], ysMaxima[i]) 90 | fin_pts.append(pt) 91 | 92 | return [xsMaxima], [ysMaxima] 93 | 94 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/dumpKeypointsToJson.py: -------------------------------------------------------------------------------- 1 | """This file gets 2D affine transformation-invariant keypoints for a given image. 2 | It's really just a bunch of hacks thrown together that works well enough for the proof of concept. 3 | The whole proof of concept would be drastically better with a well designed 2D affine 4 | transformation-invariant keypointing algorithm. 5 | """ 6 | 7 | from __future__ import division 8 | 9 | import numpy as np 10 | import cv2 11 | import curvature 12 | import sys 13 | 14 | 15 | 16 | 17 | def computeKeypoints(img): 18 | gaussW = 21 19 | img = recolour(img, gaussW) 20 | b, g, r = cv2.split(img) 21 | points1 = [] 22 | points1.extend(computeKeypoints_internal(b)) 23 | #points1.extend(computeKeypoints_internal(g)) 24 | #points1.extend(computeKeypoints_internal(r)) 25 | return points1 26 | 27 | 28 | def computeKeypoints_internal(singleChannelImage): 29 | img = singleChannelImage 30 | ret,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY) 31 | 32 | img2 = img.copy() 33 | contours, hierarchy = cv2.findContours(img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 34 | 35 | finCnts = [] 36 | area_here = 400 37 | area_here_max = 600 38 | for cnt in contours: 39 | if cv2.contourArea(cnt) > area_here: 40 | finCnts.append(cnt) 41 | 42 | contours = finCnts 43 | 44 | finCnts = [] 45 | for cnt in contours: 46 | M = cv2.moments(cnt) 47 | cX = int(M["m10"] / M["m00"]) 48 | cY = int(M["m01"] / M["m00"]) 49 | finCnts.append( (cX, cY) ) 50 | 51 | for i in range(len(contours)): 52 | cnt = contours[i] 53 | ret = [] 54 | for pnt in cnt: 55 | pt = pnt[0] 56 | ret.append( (pt[0], pt[1]) ) 57 | 58 | xcoords, ycoords = curvature.getLocalMaximumsOfCurvature( np.array(ret) ) 59 | 60 | for i in range(len(xcoords[0])): 61 | xs = xcoords[0][i] 62 | ys = ycoords[0][i] 63 | finCnts.append( (xs, ys) ) 64 | 65 | return finCnts 66 | 67 | 68 | g_pixelVals = [16, 124, 115, 68, 98, 176, 225, 69 | 55, 50, 53, 129, 19, 57, 160, 143, 237, 75, 164, 70 | 206, 167, 103, 140, 90, 112, 244, 240, 107, 202, 185, 71 | 72, 71, 109, 74, 183, 205, 46, 121, 180, 142, 126, 38, 247, 72 | 166, 144, 67, 134, 194, 198, 23, 186, 33, 163, 24, 117, 37, 73 | 76, 147, 47, 52, 42, 70, 108, 30, 54, 89, 59, 73, 91, 151, 74 | 6, 173, 86, 182, 178, 10, 207, 171, 13, 77, 88, 159, 125, 75 | 11, 188, 238, 41, 92, 118, 201, 132, 48, 28, 195, 17, 119, 64, 76 | 25, 45, 114, 80, 187, 105, 204, 158, 20, 169, 83, 191, 199, 234, 77 | 136, 81, 252, 141, 242, 219, 138, 161, 154, 135, 63, 153, 239, 130, 223, 249, 122, 93, 216, 127, 78 | 111, 15, 12, 8, 44, 193, 245, 0, 235, 120, 31, 79 | 165, 3, 155, 43, 26, 152, 94, 29, 232, 35, 218, 230, 233, 214, 217, 7, 156, 189, 228, 80 | 137, 209, 145, 226, 97, 215, 170, 51, 224, 100, 61, 69, 250, 4, 34, 56, 255, 60, 84, 110, 203, 81 | 222, 133, 248, 106, 212, 87, 253, 208, 101, 116, 251, 190, 99, 32, 113, 157, 27, 79, 82, 146, 149, 82 | 5, 210, 65, 22, 181, 131, 62, 36, 184, 196, 231, 192, 66, 213, 2, 254, 174, 211, 236, 229, 58, 221, 21, 83 | 150, 123, 175, 177, 179, 246, 96, 227, 1, 18, 241, 49, 128, 78, 40, 14, 162, 85, 39, 172, 104, 84 | 9, 200, 220, 139, 168, 95, 243, 197, 148, 102] 85 | 86 | 87 | def recolour(img, gaussW=41): 88 | pixel_vals = np.array(g_pixelVals, dtype=img.dtype) 89 | div = 40 90 | size = div * (len(pixel_vals) // div) 91 | 92 | for i in range(0, size, div): 93 | pixel_vals[i:i + div] = pixel_vals[i] 94 | 95 | pixel_vals[size:] = pixel_vals[size] 96 | 97 | img = cv2.GaussianBlur(img, (gaussW, gaussW), 0) 98 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 99 | vals = pixel_vals[gray] 100 | 101 | # v = vals[i, j] 102 | # img[i, j, 2 - v % 3] = v 103 | m = np.fliplr(np.identity(3, dtype=img.dtype)) 104 | img = m[vals % 3] * vals[:, :, np.newaxis] 105 | 106 | return img 107 | 108 | 109 | def dumpKeypoints(img, filename): 110 | import json 111 | kps = computeKeypoints(img) 112 | calcdKeypoints = [] 113 | for kp in kps: 114 | tempObj = {} 115 | tempObj["x"] = kp[0] 116 | tempObj["y"] = kp[1] 117 | calcdKeypoints.append(tempObj) 118 | keyPoints = {} 119 | keyPoints['keypoints'] = calcdKeypoints 120 | output = {} 121 | output['output'] = keyPoints 122 | 123 | f = open(filename,'w+') 124 | f.write( json.dumps(output) ) 125 | 126 | 127 | def main(): 128 | import os 129 | from shutil import copyfile 130 | import sys 131 | if len(sys.argv) < 3: 132 | print("you need to pass in an image path!!!! and also an output path for the json") 133 | return -1 134 | 135 | img = cv2.imread(sys.argv[1]) 136 | dumpKeypoints(img, sys.argv[2]) 137 | 138 | if __name__ == '__main__': 139 | main() 140 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/img_hash_opencv_module/PHash_Fast.cpp: -------------------------------------------------------------------------------- 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// 2 | // 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 | // 5 | // By downloading, copying, installing or using the software you agree to this license. 6 | // If you do not agree to this license, do not download, install, 7 | // copy or use the software. 8 | // 9 | // 10 | // License Agreement 11 | // For Open Source Computer Vision Library 12 | // 13 | // Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling, 14 | // Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners. 15 | // 16 | // Redistribution and use in source and binary forms, with or without modification, 17 | // are permitted provided that the following conditions are met: 18 | // 19 | // * Redistribution's of source code must retain the above copyright notice, 20 | // this list of conditions and the following disclaimer. 21 | // 22 | // * Redistribution's in binary form must reproduce the above copyright notice, 23 | // this list of conditions and the following disclaimer in the documentation 24 | // and/or other materials provided with the distribution. 25 | // 26 | // * The name of the copyright holders may not be used to endorse or promote products 27 | // derived from this software without specific prior written permission. 28 | // 29 | // This software is provided by the copyright holders and contributors "as is" and 30 | // any express or implied warranties, including, but not limited to, the implied 31 | // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 | // In no event shall the Intel Corporation or contributors be liable for any direct, 33 | // indirect, incidental, special, exemplary, or consequential damages 34 | // (including, but not limited to, procurement of substitute goods or services; 35 | // loss of use, data, or profits; or business interruption) however caused 36 | // and on any theory of liability, whether in contract, strict liability, 37 | // or tort (including negligence or otherwise) arising in any way out of 38 | // the use of this software, even if advised of the possibility of such damage. 39 | // 40 | //M*/ 41 | 42 | #include "precomp.hpp" 43 | 44 | #include 45 | #include 46 | 47 | namespace cv{ 48 | 49 | namespace img_hash{ 50 | 51 | PHash_Fast::~PHash_Fast() 52 | { 53 | 54 | } 55 | 56 | void PHash_Fast::compute(cv::InputArray inputArr, 57 | cv::OutputArray outputArr) 58 | { 59 | cv::Mat const input = inputArr.getMat(); 60 | CV_Assert(input.type() == CV_8UC4 || 61 | input.type() == CV_8UC3 || 62 | input.type() == CV_8U); 63 | 64 | cv::resize(input, resizeImg, cv::Size(32,32)); 65 | if(input.type() == CV_8UC3) 66 | { 67 | cv::cvtColor(resizeImg, grayImg, CV_BGR2GRAY); 68 | } 69 | else if(input.type() == CV_8UC4) 70 | { 71 | cv::cvtColor(resizeImg, grayImg, CV_BGRA2GRAY); 72 | } 73 | else 74 | { 75 | grayImg = resizeImg; 76 | } 77 | 78 | grayImg.convertTo(grayFImg, CV_32F); 79 | cv::dct(grayFImg, dctImg); 80 | dctImg(cv::Rect(0, 0, 8, 8)).copyTo(topLeftDCT); 81 | topLeftDCT.at(0, 0) = 0; 82 | float const imgMean = static_cast(cv::mean(topLeftDCT)[0]); 83 | 84 | cv::compare(topLeftDCT, imgMean, bitsImg, CMP_GT); 85 | bitsImg /= 255; 86 | outputArr.create(1, 8, CV_8U); 87 | cv::Mat hash = outputArr.getMat(); 88 | uchar *hash_ptr = hash.ptr(0); 89 | uchar const *bits_ptr = bitsImg.ptr(0); 90 | std::bitset<8> bits; 91 | for(size_t i = 0, j = 0; i != bitsImg.total(); ++j) 92 | { 93 | for(size_t k = 0; k != 8; ++k) 94 | { 95 | //avoid warning C4800, casting do not work 96 | bits[k] = bits_ptr[i++] != 0; 97 | } 98 | hash_ptr[j] = static_cast(bits.to_ulong()); 99 | } 100 | } 101 | 102 | double PHash_Fast::compare(cv::InputArray hashOne, 103 | cv::InputArray hashTwo) const 104 | { 105 | return norm(hashOne, hashTwo, NORM_HAMMING); 106 | } 107 | 108 | Ptr PHash_Fast::create() 109 | { 110 | return Ptr(); 111 | } 112 | 113 | String PHash_Fast::getDefaultName() const 114 | { 115 | return "PHash_Fast"; 116 | } 117 | 118 | void pHash_Fast(cv::InputArray inputArr, 119 | cv::OutputArray outputArr) 120 | { 121 | PHash_Fast().compute(inputArr, outputArr); 122 | } 123 | 124 | } 125 | 126 | } 127 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/img_hash_opencv_module/PHash_Fast.h: -------------------------------------------------------------------------------- 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// 2 | // 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 | // 5 | // By downloading, copying, installing or using the software you agree to this license. 6 | // If you do not agree to this license, do not download, install, 7 | // copy or use the software. 8 | // 9 | // 10 | // License Agreement 11 | // For Open Source Computer Vision Library 12 | // 13 | // Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling, 14 | // Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners. 15 | // 16 | // Redistribution and use in source and binary forms, with or without modification, 17 | // are permitted provided that the following conditions are met: 18 | // 19 | // * Redistribution's of source code must retain the above copyright notice, 20 | // this list of conditions and the following disclaimer. 21 | // 22 | // * Redistribution's in binary form must reproduce the above copyright notice, 23 | // this list of conditions and the following disclaimer in the documentation 24 | // and/or other materials provided with the distribution. 25 | // 26 | // * The name of the copyright holders may not be used to endorse or promote products 27 | // derived from this software without specific prior written permission. 28 | // 29 | // This software is provided by the copyright holders and contributors "as is" and 30 | // any express or implied warranties, including, but not limited to, the implied 31 | // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 | // In no event shall the Intel Corporation or contributors be liable for any direct, 33 | // indirect, incidental, special, exemplary, or consequential damages 34 | // (including, but not limited to, procurement of substitute goods or services; 35 | // loss of use, data, or profits; or business interruption) however caused 36 | // and on any theory of liability, whether in contract, strict liability, 37 | // or tort (including negligence or otherwise) arising in any way out of 38 | // the use of this software, even if advised of the possibility of such damage. 39 | // 40 | //M*/ 41 | 42 | #ifndef __OPENCV_PHASH_FAST_HPP__ 43 | #define __OPENCV_PHASH_FAST_HPP__ 44 | 45 | #include 46 | #include "img_hash_base.hpp" 47 | 48 | namespace cv 49 | { 50 | 51 | namespace img_hash 52 | { 53 | //! @addtogroup p_hash 54 | //! @{ 55 | 56 | /** @brief Computes pHash value of the input image 57 | @param inputArr input image want to compute hash value, 58 | type should be CV_8UC4, CV_8UC3, CV_8UC1. 59 | @param outputArr Hash value of input, it will contain 8 uchar value 60 | */ 61 | CV_EXPORTS_W void pHash_Fast(cv::InputArray inputArr, 62 | cv::OutputArray outputArr); 63 | 64 | class CV_EXPORTS_W PHash_Fast : public ImgHashBase 65 | { 66 | public: 67 | CV_WRAP ~PHash_Fast(); 68 | 69 | /** @brief Computes PHash_Fast of the input image 70 | @param inputArr input image want to compute hash value, 71 | type should be CV_8UC4, CV_8UC3, CV_8UC1. 72 | @param outputArr hash of the image 73 | */ 74 | CV_WRAP virtual void compute(cv::InputArray inputArr, 75 | cv::OutputArray outputArr); 76 | 77 | /** @brief Compare the hash value between inOne and inTwo 78 | @param hashOne Hash value one 79 | @param hashTwo Hash value two 80 | @return zero means the images are likely very similar; 81 | 5 means a few things maybe different; 10 or more means 82 | they maybe are very different image 83 | */ 84 | CV_WRAP virtual double compare(cv::InputArray hashOne, 85 | cv::InputArray hashTwo) const; 86 | 87 | CV_WRAP static Ptr create(); 88 | 89 | /** Returns the algorithm string identifier.*/ 90 | CV_WRAP virtual String getDefaultName() const; 91 | 92 | private: 93 | cv::Mat bitsImg; 94 | cv::Mat dctImg; 95 | cv::Mat grayFImg; 96 | cv::Mat grayImg; 97 | cv::Mat resizeImg; 98 | cv::Mat topLeftDCT; 99 | }; 100 | 101 | //! @} 102 | } 103 | } 104 | 105 | #endif // __OPENCV_PHASH_FAST_HPP__ 106 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/img_hash_opencv_module/img_hash.hpp: -------------------------------------------------------------------------------- 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// 2 | // 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 | // 5 | // By downloading, copying, installing or using the software you agree to this license. 6 | // If you do not agree to this license, do not download, install, 7 | // copy or use the software. 8 | // 9 | // 10 | // License Agreement 11 | // For Open Source Computer Vision Library 12 | // 13 | // Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling, 14 | // Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners. 15 | // 16 | // Redistribution and use in source and binary forms, with or without modification, 17 | // are permitted provided that the following conditions are met: 18 | // 19 | // * Redistribution's of source code must retain the above copyright notice, 20 | // this list of conditions and the following disclaimer. 21 | // 22 | // * Redistribution's in binary form must reproduce the above copyright notice, 23 | // this list of conditions and the following disclaimer in the documentation 24 | // and/or other materials provided with the distribution. 25 | // 26 | // * The name of the copyright holders may not be used to endorse or promote products 27 | // derived from this software without specific prior written permission. 28 | // 29 | // This software is provided by the copyright holders and contributors "as is" and 30 | // any express or implied warranties, including, but not limited to, the implied 31 | // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 | // In no event shall the Intel Corporation or contributors be liable for any direct, 33 | // indirect, incidental, special, exemplary, or consequential damages 34 | // (including, but not limited to, procurement of substitute goods or services; 35 | // loss of use, data, or profits; or business interruption) however caused 36 | // and on any theory of liability, whether in contract, strict liability, 37 | // or tort (including negligence or otherwise) arising in any way out of 38 | // the use of this software, even if advised of the possibility of such damage. 39 | // 40 | //M*/ 41 | 42 | #ifndef __OPENCV_IMG_HASH_H__ 43 | #define __OPENCV_IMG_HASH_H__ 44 | 45 | #include "img_hash_base.hpp" 46 | //#include "average_hash.hpp" 47 | //#include "block_mean_hash.hpp" 48 | //#include "color_moment_hash.hpp" 49 | //#include "marr_hildreth_hash.hpp" 50 | #include "phash.hpp" 51 | #include "PHash_Fast.h" 52 | ////#include "opencv2/img_hash/radial_variance_hash.hpp" 53 | 54 | /** 55 | @defgroup img_hash Provide algorithms to extract the hash of images and fast way to figure out most similar images in huge data set 56 | 57 | Namespace for all functions is **img_hash**. The module brings implementations of different image hashing. 58 | 59 | @{ 60 | @defgroup avg_hash Simple and fast perceptual hash algorithm 61 | 62 | This is a fast image hashing algorithm, but only work on simple case.For more details, please 63 | refer to http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html 64 | 65 | @defgroup p_hash Slower than average_hash, but tolerant of minor modifications 66 | 67 | This algorithm can combat more variation than averageHash, for more details please refer to 68 | http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html 69 | 70 | @defgroup marr_hash Marr-Hildreth Operator Based Hash, slowest but more discriminative 71 | http://www.phash.org/docs/pubs/thesis_zauner.pdf 72 | 73 | @defgroup radial_var_hash Image hash based on Radon transform. 74 | http://www.phash.org/docs/pubs/thesis_zauner.pdf 75 | 76 | @defgroup block_mean_hash Image hash based on block mean. 77 | http://www.phash.org/docs/pubs/thesis_zauner.pdf 78 | 79 | @defgroup color_moment_hash Image hash based on color moments. 80 | http://www.naturalspublishing.com/files/published/54515x71g3omq1.pdf 81 | @} 82 | 83 | */ 84 | 85 | #endif // __OPENCV_IMG_HASH_H__ 86 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/img_hash_opencv_module/img_hash_base.hpp: -------------------------------------------------------------------------------- 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// 2 | // 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 | // 5 | // By downloading, copying, installing or using the software you agree to this license. 6 | // If you do not agree to this license, do not download, install, 7 | // copy or use the software. 8 | // 9 | // 10 | // License Agreement 11 | // For Open Source Computer Vision Library 12 | // 13 | // Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling, 14 | // Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners. 15 | // 16 | // Redistribution and use in source and binary forms, with or without modification, 17 | // are permitted provided that the following conditions are met: 18 | // 19 | // * Redistribution's of source code must retain the above copyright notice, 20 | // this list of conditions and the following disclaimer. 21 | // 22 | // * Redistribution's in binary form must reproduce the above copyright notice, 23 | // this list of conditions and the following disclaimer in the documentation 24 | // and/or other materials provided with the distribution. 25 | // 26 | // * The name of the copyright holders may not be used to endorse or promote products 27 | // derived from this software without specific prior written permission. 28 | // 29 | // This software is provided by the copyright holders and contributors "as is" and 30 | // any express or implied warranties, including, but not limited to, the implied 31 | // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 | // In no event shall the Intel Corporation or contributors be liable for any direct, 33 | // indirect, incidental, special, exemplary, or consequential damages 34 | // (including, but not limited to, procurement of substitute goods or services; 35 | // loss of use, data, or profits; or business interruption) however caused 36 | // and on any theory of liability, whether in contract, strict liability, 37 | // or tort (including negligence or otherwise) arising in any way out of 38 | // the use of this software, even if advised of the possibility of such damage. 39 | // 40 | //M*/ 41 | 42 | #ifndef __OPENCV_IMG_HASH_BASE_HPP__ 43 | #define __OPENCV_IMG_HASH_BASE_HPP__ 44 | 45 | #include 46 | 47 | namespace cv 48 | { 49 | 50 | namespace img_hash 51 | { 52 | 53 | //! @addtogroup ihash 54 | //! @{ 55 | /**@brief The base class for image hash algorithms 56 | */ 57 | class ImgHashBase : public Algorithm 58 | { 59 | public: 60 | /** @brief Computes hash of the input image 61 | @param inputArr input image want to compute hash value 62 | @param outputArr hash of the image 63 | */ 64 | CV_EXPORTS virtual void compute(cv::InputArray inputArr, 65 | cv::OutputArray outputArr) = 0; 66 | 67 | /** @brief Compare the hash value between inOne and inTwo 68 | @param hashOne Hash value one 69 | @param hashTwo Hash value two 70 | @return value indicate similarity between inOne and inTwo, the meaning 71 | of the value vary from algorithms to algorithms 72 | */ 73 | CV_EXPORTS virtual double compare(cv::InputArray hashOne, 74 | cv::InputArray hashTwo) const = 0; 75 | 76 | /** Returns the algorithm string identifier.*/ 77 | CV_WRAP virtual String getDefaultName() const = 0; 78 | }; 79 | //! @} 80 | }//ihash 81 | }//cv 82 | 83 | #endif // __OPENCV_IMG_HASH_BASE_HPP__ 84 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/img_hash_opencv_module/phash.cpp: -------------------------------------------------------------------------------- 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// 2 | // 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 | // 5 | // By downloading, copying, installing or using the software you agree to this license. 6 | // If you do not agree to this license, do not download, install, 7 | // copy or use the software. 8 | // 9 | // 10 | // License Agreement 11 | // For Open Source Computer Vision Library 12 | // 13 | // Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling, 14 | // Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners. 15 | // 16 | // Redistribution and use in source and binary forms, with or without modification, 17 | // are permitted provided that the following conditions are met: 18 | // 19 | // * Redistribution's of source code must retain the above copyright notice, 20 | // this list of conditions and the following disclaimer. 21 | // 22 | // * Redistribution's in binary form must reproduce the above copyright notice, 23 | // this list of conditions and the following disclaimer in the documentation 24 | // and/or other materials provided with the distribution. 25 | // 26 | // * The name of the copyright holders may not be used to endorse or promote products 27 | // derived from this software without specific prior written permission. 28 | // 29 | // This software is provided by the copyright holders and contributors "as is" and 30 | // any express or implied warranties, including, but not limited to, the implied 31 | // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 | // In no event shall the Intel Corporation or contributors be liable for any direct, 33 | // indirect, incidental, special, exemplary, or consequential damages 34 | // (including, but not limited to, procurement of substitute goods or services; 35 | // loss of use, data, or profits; or business interruption) however caused 36 | // and on any theory of liability, whether in contract, strict liability, 37 | // or tort (including negligence or otherwise) arising in any way out of 38 | // the use of this software, even if advised of the possibility of such damage. 39 | // 40 | //M*/ 41 | 42 | #include "precomp.hpp" 43 | 44 | #include 45 | #include 46 | 47 | namespace cv{ 48 | 49 | namespace img_hash{ 50 | 51 | PHash::~PHash() 52 | { 53 | 54 | } 55 | 56 | void PHash::compute(cv::InputArray inputArr, 57 | cv::OutputArray outputArr) 58 | { 59 | cv::Mat const input = inputArr.getMat(); 60 | CV_Assert(input.type() == CV_8UC4 || 61 | input.type() == CV_8UC3 || 62 | input.type() == CV_8U); 63 | 64 | cv::resize(input, resizeImg, cv::Size(32,32)); 65 | if(input.type() == CV_8UC3) 66 | { 67 | cv::cvtColor(resizeImg, grayImg, CV_BGR2GRAY); 68 | } 69 | else if(input.type() == CV_8UC4) 70 | { 71 | cv::cvtColor(resizeImg, grayImg, CV_BGRA2GRAY); 72 | } 73 | else 74 | { 75 | grayImg = resizeImg; 76 | } 77 | 78 | grayImg.convertTo(grayFImg, CV_32F); 79 | cv::dct(grayFImg, dctImg); 80 | dctImg(cv::Rect(0, 0, 8, 8)).copyTo(topLeftDCT); 81 | topLeftDCT.at(0, 0) = 0; 82 | float const imgMean = static_cast(cv::mean(topLeftDCT)[0]); 83 | 84 | cv::compare(topLeftDCT, imgMean, bitsImg, CMP_GT); 85 | bitsImg /= 255; 86 | outputArr.create(1, 8, CV_8U); 87 | cv::Mat hash = outputArr.getMat(); 88 | uchar *hash_ptr = hash.ptr(0); 89 | uchar const *bits_ptr = bitsImg.ptr(0); 90 | std::bitset<8> bits; 91 | for(size_t i = 0, j = 0; i != bitsImg.total(); ++j) 92 | { 93 | for(size_t k = 0; k != 8; ++k) 94 | { 95 | //avoid warning C4800, casting do not work 96 | bits[k] = bits_ptr[i++] != 0; 97 | } 98 | hash_ptr[j] = static_cast(bits.to_ulong()); 99 | } 100 | } 101 | 102 | double PHash::compare(cv::InputArray hashOne, 103 | cv::InputArray hashTwo) const 104 | { 105 | return norm(hashOne, hashTwo, NORM_HAMMING); 106 | } 107 | 108 | Ptr PHash::create() 109 | { 110 | return Ptr(); 111 | } 112 | 113 | String PHash::getDefaultName() const 114 | { 115 | return "PHash"; 116 | } 117 | 118 | void pHash(cv::InputArray inputArr, 119 | cv::OutputArray outputArr) 120 | { 121 | PHash().compute(inputArr, outputArr); 122 | } 123 | 124 | } 125 | 126 | } 127 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/img_hash_opencv_module/phash.hpp: -------------------------------------------------------------------------------- 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// 2 | // 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 | // 5 | // By downloading, copying, installing or using the software you agree to this license. 6 | // If you do not agree to this license, do not download, install, 7 | // copy or use the software. 8 | // 9 | // 10 | // License Agreement 11 | // For Open Source Computer Vision Library 12 | // 13 | // Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling, 14 | // Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners. 15 | // 16 | // Redistribution and use in source and binary forms, with or without modification, 17 | // are permitted provided that the following conditions are met: 18 | // 19 | // * Redistribution's of source code must retain the above copyright notice, 20 | // this list of conditions and the following disclaimer. 21 | // 22 | // * Redistribution's in binary form must reproduce the above copyright notice, 23 | // this list of conditions and the following disclaimer in the documentation 24 | // and/or other materials provided with the distribution. 25 | // 26 | // * The name of the copyright holders may not be used to endorse or promote products 27 | // derived from this software without specific prior written permission. 28 | // 29 | // This software is provided by the copyright holders and contributors "as is" and 30 | // any express or implied warranties, including, but not limited to, the implied 31 | // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 | // In no event shall the Intel Corporation or contributors be liable for any direct, 33 | // indirect, incidental, special, exemplary, or consequential damages 34 | // (including, but not limited to, procurement of substitute goods or services; 35 | // loss of use, data, or profits; or business interruption) however caused 36 | // and on any theory of liability, whether in contract, strict liability, 37 | // or tort (including negligence or otherwise) arising in any way out of 38 | // the use of this software, even if advised of the possibility of such damage. 39 | // 40 | //M*/ 41 | 42 | #ifndef __OPENCV_PHASH_HPP__ 43 | #define __OPENCV_PHASH_HPP__ 44 | 45 | #include 46 | #include "img_hash_base.hpp" 47 | 48 | namespace cv 49 | { 50 | 51 | namespace img_hash 52 | { 53 | //! @addtogroup p_hash 54 | //! @{ 55 | 56 | /** @brief Computes pHash value of the input image 57 | @param inputArr input image want to compute hash value, 58 | type should be CV_8UC4, CV_8UC3, CV_8UC1. 59 | @param outputArr Hash value of input, it will contain 8 uchar value 60 | */ 61 | CV_EXPORTS_W void pHash(cv::InputArray inputArr, 62 | cv::OutputArray outputArr); 63 | 64 | class CV_EXPORTS_W PHash : public ImgHashBase 65 | { 66 | public: 67 | CV_WRAP ~PHash(); 68 | 69 | /** @brief Computes PHash of the input image 70 | @param inputArr input image want to compute hash value, 71 | type should be CV_8UC4, CV_8UC3, CV_8UC1. 72 | @param outputArr hash of the image 73 | */ 74 | CV_WRAP virtual void compute(cv::InputArray inputArr, 75 | cv::OutputArray outputArr); 76 | 77 | /** @brief Compare the hash value between inOne and inTwo 78 | @param hashOne Hash value one 79 | @param hashTwo Hash value two 80 | @return zero means the images are likely very similar; 81 | 5 means a few things maybe different; 10 or more means 82 | they maybe are very different image 83 | */ 84 | CV_WRAP virtual double compare(cv::InputArray hashOne, 85 | cv::InputArray hashTwo) const; 86 | 87 | CV_WRAP static Ptr create(); 88 | 89 | /** Returns the algorithm string identifier.*/ 90 | CV_WRAP virtual String getDefaultName() const; 91 | 92 | private: 93 | cv::Mat bitsImg; 94 | cv::Mat dctImg; 95 | cv::Mat grayFImg; 96 | cv::Mat grayImg; 97 | cv::Mat resizeImg; 98 | cv::Mat topLeftDCT; 99 | }; 100 | 101 | //! @} 102 | } 103 | } 104 | 105 | #endif // __OPENCV_PHASH_HPP__ 106 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/img_hash_opencv_module/precomp.hpp: -------------------------------------------------------------------------------- 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// 2 | // 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 | // 5 | // By downloading, copying, installing or using the software you agree to this license. 6 | // If you do not agree to this license, do not download, install, 7 | // copy or use the software. 8 | // 9 | // 10 | // License Agreement 11 | // For Open Source Computer Vision Library 12 | // 13 | // Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling, 14 | // Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners. 15 | // 16 | // Redistribution and use in source and binary forms, with or without modification, 17 | // are permitted provided that the following conditions are met: 18 | // 19 | // * Redistribution's of source code must retain the above copyright notice, 20 | // this list of conditions and the following disclaimer. 21 | // 22 | // * Redistribution's in binary form must reproduce the above copyright notice, 23 | // this list of conditions and the following disclaimer in the documentation 24 | // and/or other materials provided with the distribution. 25 | // 26 | // * The name of the copyright holders may not be used to endorse or promote products 27 | // derived from this software without specific prior written permission. 28 | // 29 | // This software is provided by the copyright holders and contributors "as is" and 30 | // any express or implied warranties, including, but not limited to, the implied 31 | // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 | // In no event shall the Intel Corporation or contributors be liable for any direct, 33 | // indirect, incidental, special, exemplary, or consequential damages 34 | // (including, but not limited to, procurement of substitute goods or services; 35 | // loss of use, data, or profits; or business interruption) however caused 36 | // and on any theory of liability, whether in contract, strict liability, 37 | // or tort (including negligence or otherwise) arising in any way out of 38 | // the use of this software, even if advised of the possibility of such damage. 39 | // 40 | //M*/ 41 | 42 | #ifndef __OPENCV_PRECOMP_H__ 43 | #define __OPENCV_PRECOMP_H__ 44 | 45 | #include 46 | //#include "opencv2/imgproc.hpp" 47 | #include 48 | #include "img_hash.hpp" 49 | 50 | #endif 51 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/img_hash_opencv_module/precomp.hpp~: -------------------------------------------------------------------------------- 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// 2 | // 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 | // 5 | // By downloading, copying, installing or using the software you agree to this license. 6 | // If you do not agree to this license, do not download, install, 7 | // copy or use the software. 8 | // 9 | // 10 | // License Agreement 11 | // For Open Source Computer Vision Library 12 | // 13 | // Copyright (C) 2015, University of Ostrava, Institute for Research and Applications of Fuzzy Modeling, 14 | // Pavel Vlasanek, all rights reserved. Third party copyrights are property of their respective owners. 15 | // 16 | // Redistribution and use in source and binary forms, with or without modification, 17 | // are permitted provided that the following conditions are met: 18 | // 19 | // * Redistribution's of source code must retain the above copyright notice, 20 | // this list of conditions and the following disclaimer. 21 | // 22 | // * Redistribution's in binary form must reproduce the above copyright notice, 23 | // this list of conditions and the following disclaimer in the documentation 24 | // and/or other materials provided with the distribution. 25 | // 26 | // * The name of the copyright holders may not be used to endorse or promote products 27 | // derived from this software without specific prior written permission. 28 | // 29 | // This software is provided by the copyright holders and contributors "as is" and 30 | // any express or implied warranties, including, but not limited to, the implied 31 | // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 | // In no event shall the Intel Corporation or contributors be liable for any direct, 33 | // indirect, incidental, special, exemplary, or consequential damages 34 | // (including, but not limited to, procurement of substitute goods or services; 35 | // loss of use, data, or profits; or business interruption) however caused 36 | // and on any theory of liability, whether in contract, strict liability, 37 | // or tort (including negligence or otherwise) arising in any way out of 38 | // the use of this software, even if advised of the possibility of such damage. 39 | // 40 | //M*/ 41 | 42 | #ifndef __OPENCV_PRECOMP_H__ 43 | #define __OPENCV_PRECOMP_H__ 44 | 45 | #include "opencv2/opencv.hpp" 46 | //#include "opencv2/imgproc.hpp" 47 | //#include "opencv2/imgproc/types_c.h" 48 | #include "opencv2/img_hash.hpp" 49 | 50 | #endif 51 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/main.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include /* printf, scanf, puts, NULL */ 8 | #include /* srand, rand */ 9 | #include /* time */ 10 | 11 | #include "PerceptualHash.h" 12 | #include "PerceptualHash_Fast.h" 13 | 14 | //#include "FragmentHash.h" 15 | //#include "ShapeAndPositionInvariantImage.h" 16 | #include "Triangle.h" 17 | #include "mainImageProcessingFunctions.hpp" 18 | #include 19 | #include 20 | #include "utils.hpp" 21 | #include "hiredis/hiredis.h" 22 | #include 23 | 24 | using namespace std; 25 | 26 | void addAllHashesToRedis(string imagePath){ 27 | auto loadedImage = getLoadedImage(imagePath); 28 | vector keypoints = getKeypoints(loadedImage.getImageData()); 29 | vector tris = buildTrianglesFromKeypoints(keypoints, 50, 400);; 30 | auto hashTrianglePairs = cv::getAllTheHashesForImage(loadedImage, tris); 31 | 32 | redisContext *c; 33 | // redisReply *reply; 34 | const char *hostname = "127.0.0.1"; 35 | int port = 6379; 36 | 37 | struct timeval timeout = { 1, 500000 }; // 1.5 seconds 38 | c = redisConnectWithTimeout(hostname, port, timeout); 39 | if (c == NULL || c->err) { 40 | if (c) { 41 | printf("Connection error: %s\n", c->errstr); 42 | redisFree(c); 43 | } else { 44 | printf("Connection error: can't allocate redis context\n"); 45 | } 46 | exit(1); 47 | } 48 | 49 | int count = 0; 50 | for (auto hashTriangle : hashTrianglePairs) 51 | { 52 | string redisEntry = convertToRedisEntryJson(imagePath, hashTriangle.first); 53 | redisCommand(c,"SADD %s %s", hashTriangle.second.toString().c_str(), redisEntry.c_str()); 54 | 55 | count++; 56 | } 57 | cout << "Added " << count << " image fragments to DB" << endl; 58 | } 59 | 60 | int findMatchingHashInRedis(string imageName){ 61 | auto loadedImage = getLoadedImage(imageName); 62 | vector keypoints = getKeypoints(loadedImage.getImageData()); 63 | vector tris = buildTrianglesFromKeypoints(keypoints, 50, 400);; 64 | auto hashTrianglePairs = cv::getAllTheHashesForImage(loadedImage, tris); 65 | 66 | redisContext *c; 67 | redisReply *reply; 68 | const char *hostname = "127.0.0.1"; 69 | int port = 6379; 70 | 71 | struct timeval timeout = { 1, 500000 }; // 1.5 seconds 72 | c = redisConnectWithTimeout(hostname, port, timeout); 73 | if (c == NULL || c->err) { 74 | if (c) { 75 | printf("Connection error: %s\n", c->errstr); 76 | redisFree(c); 77 | } else { 78 | printf("Connection error: can't allocate redis context\n"); 79 | } 80 | exit(1); 81 | } 82 | // cout << "finished hashing" << endl; 83 | // vector result; 84 | vector result; 85 | // for (auto hash : hashes) 86 | // { 87 | unsigned int batchSize = 1000; 88 | for (unsigned int i = 0; i < hashTrianglePairs.size(); i++) 89 | { 90 | unsigned int j = 0; 91 | for(;i < hashTrianglePairs.size() && j < batchSize; j++, i++){ 92 | auto hashTriangle = hashTrianglePairs[i]; 93 | redisAppendCommand(c,"SMEMBERS %s", hashTriangle.second.toString().c_str()); 94 | } 95 | 96 | for(; j > 0; j--){ 97 | redisGetReply(c, (void **) &reply ); 98 | //unsigned int r = redisGetReply(c, (void **) &reply ); 99 | for (unsigned int k = 0; k < reply->elements; k++) 100 | { 101 | string str(reply->element[k]->str); 102 | result.push_back(str); 103 | } 104 | } 105 | 106 | } 107 | std::map> resultMap; 108 | for (auto t_str : result) 109 | { 110 | auto redisReplyImageName = getImageNameFromRedisEntry(t_str); 111 | auto redisReplyTriangle = getTriangleFromRedisEntry(t_str); 112 | resultMap[redisReplyImageName]; 113 | resultMap[redisReplyImageName].push_back(redisReplyTriangle); 114 | } 115 | cout << "Matches:" << endl; 116 | for(auto const& ent1 : resultMap) 117 | { 118 | auto tempImg = cv::imread(ent1.first); 119 | //drawTrianglesOntoImage(ent1.second, tempImg); 120 | //cv::imwrite("./outputImages/outputFromSearch_"+ent1.second[0].toString()+".jpg", tempImg); 121 | cout << ent1.first << ": " << ent1.second.size() << endl; 122 | } 123 | 124 | cout << "Number of matches: " << result.size() << endl; 125 | return result.size(); 126 | } 127 | 128 | 129 | void redisClearDatabase(){ 130 | redisContext *c; 131 | const char *hostname = "127.0.0.1"; 132 | int port = 6379; 133 | 134 | struct timeval timeout = { 1, 500000 }; // 1.5 seconds 135 | c = redisConnectWithTimeout(hostname, port, timeout); 136 | if (c == NULL || c->err) { 137 | if (c) { 138 | printf("Connection error: %s\n", c->errstr); 139 | redisFree(c); 140 | } else { 141 | printf("Connection error: can't allocate redis context\n"); 142 | } 143 | exit(1); 144 | } 145 | redisCommand(c,"FLUSHALL"); 146 | redisFree(c); 147 | } 148 | 149 | void compareTwoImages(string imageName1, string imageName2) { 150 | 151 | //clear the db 152 | redisClearDatabase(); 153 | 154 | //add the first image to db 155 | addAllHashesToRedis(imageName1); 156 | 157 | //check for matches using the second image 158 | cout << "{\n\tcount: " << findMatchingHashInRedis(imageName2) << "\n}"; 159 | } 160 | 161 | int main(int argc, char* argv[]) 162 | { 163 | if (argc < 3){ 164 | printf("error: no args!!!\n Example:\nTo insert an image run the following command:\n ./runDemo insert inputImages/cat1.png\nTo query the database with an image run the following command:\n ./runDemo lookup inputImages/cat1.png\n"); 165 | return -1; 166 | } 167 | 168 | 169 | if (argc > 2 && !strcmp(argv[1], "insert")){ 170 | for (int i = 2; i < argc; i++) { 171 | string imageName = argv[i]; 172 | addAllHashesToRedis(imageName); 173 | } 174 | }else if (argc > 2 && !strcmp(argv[1], "lookup")){ 175 | for (int i = 2; i < argc; i++) { 176 | string imageName = argv[i]; 177 | findMatchingHashInRedis(imageName); 178 | } 179 | }else{ 180 | cout << "Bad argument: " << argv[1] << endl; 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/mainImageProcessingFunctions.hpp: -------------------------------------------------------------------------------- 1 | #ifndef mainImageProcessingFunctions_cpp 2 | #define mainImageProcessingFunctions_cpp 3 | 4 | 5 | #include 6 | #include 7 | #include 8 | #include // std::setw 9 | #include /* pow, atan2 */ 10 | 11 | #include "FragmentHash.h" 12 | #include "ShapeAndPositionInvariantImage.h" 13 | #include "Triangle.h" 14 | 15 | #define NUM_OF_ROTATIONS 3 16 | #define HASH_SIZE 8 17 | #define FRAGMENT_WIDTH 60*.86 18 | #define FRAGMENT_HEIGHT 60 19 | #define PI 3.14159265 20 | 21 | const std::vector getTargetTriangle(int scalex, int scaley) 22 | { 23 | std::vector v; 24 | v.push_back(Keypoint(0, 0)); 25 | v.push_back(Keypoint(.5*scalex, 1 * scaley)); 26 | v.push_back(Keypoint(1 * scalex, 0)); 27 | return v; 28 | } 29 | 30 | namespace cv 31 | { 32 | Matx33d calcTransformationMatrix(const std::vector& inputTriangle, const std::vector& targetTriangle) 33 | { 34 | /* 35 | * ######CODE BY ROSCA####### 36 | */ 37 | Keypoint target_pt1 = targetTriangle[1]; 38 | Keypoint target_pt2 = targetTriangle[2]; 39 | cv::Matx33d targetPoints(target_pt1.x, target_pt2.x, 0.0, 40 | target_pt1.y, target_pt2.y, 0.0, 41 | 0.0, 0.0, 1.0); 42 | 43 | Keypoint pt2 = Keypoint(inputTriangle[1].x - inputTriangle[0].x, inputTriangle[1].y - inputTriangle[0].y); 44 | Keypoint pt3 = Keypoint(inputTriangle[2].x - inputTriangle[0].x, inputTriangle[2].y - inputTriangle[0].y); 45 | 46 | cv::Matx33d inputPoints(pt2.x, pt3.x, 0.0, 47 | pt2.y, pt3.y, 0.0, 48 | 0.0, 0.0, 1.0); 49 | 50 | cv::Matx33d transpose_m(1.0, 0.0, -inputTriangle[0].x, 51 | 0.0, 1.0, -inputTriangle[0].y, 52 | 0.0, 0.0, 1.0); 53 | 54 | return targetPoints * inputPoints.inv() * transpose_m; 55 | } 56 | 57 | bool isToTheLeftOf(Keypoint pt1, Keypoint pt2) 58 | { 59 | return ((0 - pt1.x)*(pt2.y - pt1.y) - (0 - pt1.y)*(pt2.x - pt1.x)) > 0; 60 | } 61 | 62 | const std::vector prepShapeForCalcOfTransformationMatrix(const std::vector& inputTriangle, const std::vector& targetTriangle) 63 | { 64 | auto pt1 = inputTriangle[0]; 65 | auto pt2 = inputTriangle[1]; 66 | auto pt3 = inputTriangle[2]; 67 | auto pt2_t = Keypoint(pt2.x - pt1.x, pt2.y - pt1.y); 68 | auto pt3_t = Keypoint(pt3.x - pt1.x, pt3.y - pt1.y); 69 | 70 | auto ret = std::vector(); 71 | ret.push_back(pt1); 72 | if (isToTheLeftOf(pt2_t, pt3_t)) 73 | { 74 | ret.push_back(pt2); 75 | ret.push_back(pt3); 76 | } else { 77 | ret.push_back(pt3); 78 | ret.push_back(pt2); 79 | } 80 | return ret; 81 | } 82 | 83 | //@shift: this is used to get every rotation of the triangle we need (3 times, one for each edge of the triangle) 84 | const std::vector prepShapeForCalcOfTransformationMatrixWithShift(const std::vector shape, const std::vector& targetTriangle, int shift) 85 | { 86 | auto shape_cpy = shape; 87 | shift %= shape_cpy.size(); 88 | std::rotate(shape_cpy.begin(), shape_cpy.begin() + shift, shape_cpy.end()); 89 | return prepShapeForCalcOfTransformationMatrix(shape_cpy, targetTriangle); 90 | } 91 | 92 | Mat covertToDynamicallyAllocatedMatrix(const Matx33d transformation_matrix) 93 | { 94 | cv::Mat m = cv::Mat::ones(2, 3, CV_64F); 95 | m.at(0, 0) = transformation_matrix(0, 0); 96 | m.at(0, 1) = transformation_matrix(0, 1); 97 | m.at(0, 2) = transformation_matrix(0, 2); 98 | m.at(1, 0) = transformation_matrix(1, 0); 99 | m.at(1, 1) = transformation_matrix(1, 1); 100 | m.at(1, 2) = transformation_matrix(1, 2); 101 | return m; 102 | } 103 | 104 | Mat applyTransformationMatrixToImage(Mat inputImage, const Matx33d transformation_matrix, int outputTriangleSizeX, int outputTriangleSizeY) 105 | { 106 | Mat m = covertToDynamicallyAllocatedMatrix(transformation_matrix); 107 | Mat outputImage(outputTriangleSizeY, outputTriangleSizeX, CV_8UC3, Scalar(0, 0, 0)); 108 | warpAffine(inputImage, outputImage, m, outputImage.size()); 109 | return outputImage; 110 | } 111 | 112 | Matx33d calcTransformationMatrixWithShapePreperation(const std::vector& inputTriangle, const std::vector& targetTriangle, int shift) 113 | { 114 | auto newShape = prepShapeForCalcOfTransformationMatrixWithShift(inputTriangle, targetTriangle, shift); 115 | return calcTransformationMatrix(newShape, targetTriangle); 116 | } 117 | 118 | std::vector normaliseScaleAndRotationForSingleFrag(ShapeAndPositionInvariantImage& fragment) 119 | { 120 | auto shape = fragment.getShape(); 121 | auto ret = std::vector(); 122 | int outputTriangleSizeX = FRAGMENT_WIDTH; 123 | int outputTriangleSizeY = FRAGMENT_HEIGHT; 124 | for (unsigned int i = 0; i < NUM_OF_ROTATIONS; i++) 125 | { 126 | auto transformationMatrix = calcTransformationMatrixWithShapePreperation(shape, getTargetTriangle(outputTriangleSizeX, outputTriangleSizeY), i); 127 | auto input_img = fragment.getImageData(); 128 | auto newImageData = applyTransformationMatrixToImage(input_img, transformationMatrix, outputTriangleSizeX, outputTriangleSizeY); 129 | auto t = ShapeAndPositionInvariantImage(fragment.getImageName(), newImageData, shape, fragment.getImageFullPath()); 130 | ret.push_back(t); 131 | } 132 | 133 | return ret; 134 | } 135 | 136 | ShapeAndPositionInvariantImage getFragment(const ShapeAndPositionInvariantImage& input_image, const Triangle& tri) 137 | { 138 | return ShapeAndPositionInvariantImage(input_image.getImageName(), input_image.getImageData(), tri.toKeypoints(), ""); 139 | } 140 | 141 | template std::vector getHashesForFragments(std::vector& normalisedFragments) 142 | { 143 | auto ret = std::vector(); 144 | for (auto frag : normalisedFragments) 145 | { 146 | auto calculatedHash = T(frag); 147 | ret.push_back(calculatedHash); 148 | } 149 | return ret; 150 | } 151 | 152 | template std::vector getHashesForTriangle(ShapeAndPositionInvariantImage& input_image, const Triangle& tri) 153 | { 154 | auto fragment = getFragment(input_image, tri); 155 | auto normalisedFragments = normaliseScaleAndRotationForSingleFrag(fragment); 156 | auto hashes = getHashesForFragments(normalisedFragments); 157 | 158 | return hashes; 159 | } 160 | 161 | template vector> getAllTheHashesForImage(ShapeAndPositionInvariantImage inputImage, std::vector triangles) 162 | { 163 | ShapeAndPositionInvariantImage inputImage2("", inputImage.getImageData(), std::vector(), ""); 164 | 165 | vector> ret(triangles.size()*NUM_OF_ROTATIONS); 166 | 167 | #pragma omp parallel for 168 | for (unsigned int i = 0; i < triangles.size(); i++) 169 | { 170 | auto tri = triangles[i]; 171 | auto hashes = getHashesForTriangle(inputImage2, tri); 172 | 173 | for (unsigned int j = 0; j < 3; j++) 174 | { 175 | ret[(i * 3) + j] = pair(tri, hashes[j]); 176 | } 177 | } 178 | return ret; 179 | } 180 | 181 | }//namespace cv 182 | 183 | #endif//mainImageProcessingFunctions_cpp 184 | -------------------------------------------------------------------------------- /fullEndToEndDemo/src/utils.hpp: -------------------------------------------------------------------------------- 1 | #ifndef utils_utils_hpp 2 | #define utils_utils_hpp 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include /* printf, scanf, puts, NULL */ 11 | #include /* srand, rand */ 12 | #include /* time */ 13 | #include 14 | 15 | #include "PerceptualHash.h" 16 | 17 | #include "Triangle.h" 18 | #include "mainImageProcessingFunctions.hpp" 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include "utils.hpp" 24 | #include 25 | 26 | using boost::property_tree::ptree; 27 | using boost::property_tree::read_json; 28 | using boost::property_tree::write_json; 29 | 30 | namespace pt = boost::property_tree; 31 | using namespace std; 32 | 33 | static unsigned long x=123456789, y=362436069, z=521288629; 34 | 35 | unsigned long xorshf96(void) { //period 2^96-1 36 | unsigned long t; 37 | x ^= x << 16; 38 | x ^= x >> 5; 39 | x ^= x << 1; 40 | 41 | t = x; 42 | x = y; 43 | y = z; 44 | z = t ^ x ^ y; 45 | 46 | return z; 47 | } 48 | 49 | cv::Mat convertKeypointsVectorToMat(vector kps) 50 | { 51 | cv::Mat ret = cv::Mat::zeros(3, kps.size(), CV_64F); 52 | 53 | for (unsigned int i = 0; i < kps.size(); i++) 54 | { 55 | auto k = kps[i]; 56 | ret.at(0, i) = k.x; 57 | ret.at(1, i) = k.y; 58 | ret.at(2, i) = 1; 59 | } 60 | return ret; 61 | } 62 | 63 | vector convertMatToKeypointsVector(cv::Mat inputPoints) 64 | { 65 | vector ret; 66 | for (unsigned int i = 0; i < (unsigned int) inputPoints.cols; i++) 67 | { 68 | double x = inputPoints.at(0, i); 69 | double y = inputPoints.at(1, i); 70 | Keypoint temp(x, y); 71 | ret.push_back(temp); 72 | } 73 | return ret; 74 | } 75 | 76 | void drawSingleTriangleOntoImage(Triangle tri, cv::Mat inputImage, bool setColour = false, cv::Scalar colourInput = cv::Scalar(0,0,0)){ 77 | auto keypoints = tri.toKeypoints(); 78 | auto prevPoint = keypoints.back(); 79 | 80 | int r = (int) xorshf96(); 81 | int g = (int) xorshf96(); 82 | int b = (int) xorshf96(); 83 | for (int i = 0; i < 3; i++){ 84 | auto currentPoint = keypoints[i]; 85 | auto colour = (setColour)? colourInput: cv::Scalar(b,g,r); 86 | 87 | cv::line(inputImage, cv::Point(prevPoint.x, prevPoint.y), cv::Point(currentPoint.x, currentPoint.y), 88 | colour); 89 | //cv::imshow("something", inputImage); 90 | //cv::waitKey(10); 91 | prevPoint = currentPoint; 92 | } 93 | } 94 | 95 | 96 | void drawTrianglesOntoImage(vector tris, cv::Mat inputImage, bool randomColours = true) 97 | { 98 | for (auto tri: tris){ 99 | drawSingleTriangleOntoImage(tri, inputImage, !randomColours); 100 | } 101 | } 102 | 103 | vector applyTransformationMatrixToKeypointVector(vector keypoints, cv::Mat transformationMat) 104 | { 105 | cv::Mat keypointMat = convertKeypointsVectorToMat(keypoints); 106 | cv::Mat transKeypointMat = transformationMat*keypointMat; 107 | return convertMatToKeypointsVector(transKeypointMat); 108 | } 109 | 110 | vector readKeypointsFromJsonFile(string filename); 111 | 112 | //TODO: explain why this sucks 113 | vector getKeypoints(cv::Mat inputImage) 114 | { 115 | cv::imwrite("tempImage.jpg", inputImage); 116 | FILE* file = popen("python ./src/dumpKeypointsToJson.py ./tempImage.jpg ./tempOutputKeypoints.json", "r"); 117 | pclose(file); 118 | return readKeypointsFromJsonFile("./tempOutputKeypoints.json"); 119 | } 120 | 121 | cv::Size calcBoundingRectangleOfShape(cv::Mat shape) 122 | { 123 | //convert Mat to vector of 2d Points 124 | vector convertedMat; 125 | for (int i = 0; i < shape.cols; i++) { 126 | double x = shape.at(i); 127 | double y = shape.at(shape.cols + i); 128 | cv::Point tempPt(x, y); 129 | convertedMat.push_back(tempPt); 130 | } 131 | 132 | auto resultRect = cv::boundingRect(cv::Mat(convertedMat)); 133 | return resultRect.size(); 134 | } 135 | 136 | Triangle getTriangleFromRedisEntry(string redisEntry) 137 | { 138 | pt::ptree root; 139 | std::stringstream ss; 140 | ss << redisEntry; 141 | pt::read_json(ss, root); 142 | 143 | vector keypoints; 144 | for (auto pt_j : root.get_child("triangle")) 145 | { 146 | double x = pt_j.second.get("x"); 147 | double y = pt_j.second.get("y"); 148 | keypoints.push_back(Keypoint(x, y)); 149 | } 150 | return Triangle(keypoints); 151 | } 152 | 153 | string getImageNameFromRedisEntry(string redisEntry) 154 | { 155 | pt::ptree root; 156 | std::stringstream ss; 157 | ss << redisEntry; 158 | pt::read_json(ss, root); 159 | return root.get("imageName"); 160 | } 161 | 162 | string convertToRedisEntryJson(string imageName, Triangle tri) 163 | { 164 | pt::ptree root; 165 | root.put("imageName", imageName); 166 | 167 | pt::ptree points; 168 | for (auto pt : tri.toKeypoints()) 169 | { 170 | pt::ptree point; 171 | point.put("x", pt.x); 172 | point.put("y", pt.y); 173 | points.push_back(std::make_pair("", point)); 174 | } 175 | root.add_child("triangle", points); 176 | 177 | std::ostringstream buf; 178 | write_json(buf, root, false); 179 | return buf.str(); 180 | } 181 | 182 | 183 | double getKeypointDistance(Keypoint one, Keypoint two) 184 | { 185 | return sqrt(pow(one.x - two.x, 2.0) + pow(one.y - two.y, 2.0)); 186 | } 187 | 188 | vector findKeypointsWithInRangeFromTwoPoints(Keypoint one, Keypoint two, vector otherKeypoints, double lowerThreshold, double upperThreshold) 189 | { 190 | vector result; 191 | for (auto cmpKp : otherKeypoints) 192 | { 193 | double distanceFromPointOne = getKeypointDistance(one, cmpKp); 194 | double distanceFromPointTwo = getKeypointDistance(two, cmpKp); 195 | if (distanceFromPointOne > lowerThreshold && distanceFromPointOne < upperThreshold 196 | && distanceFromPointTwo > lowerThreshold && distanceFromPointTwo < upperThreshold) 197 | { 198 | result.push_back(cmpKp); 199 | } 200 | } 201 | return result; 202 | } 203 | 204 | bool isInKeypointExcludeList(Keypoint keypoint, vector excludeList) { 205 | for (auto kp : excludeList) 206 | { 207 | if (kp.x == keypoint.x && kp.y == keypoint.y){ 208 | return true; 209 | } 210 | } 211 | return false; 212 | } 213 | 214 | bool shouldPointBeExcluded(Keypoint pt, vector previouslyProcessedPoints, vector currentProcessedPoints, Keypoint currentTopLevelPoint, Keypoint currentSecondLevelPoint) 215 | { 216 | return isInKeypointExcludeList(pt, previouslyProcessedPoints) 217 | || isInKeypointExcludeList(pt, currentProcessedPoints) 218 | || currentTopLevelPoint == pt 219 | || currentSecondLevelPoint == pt 220 | ; 221 | } 222 | 223 | //TODO: explain why this sucks 224 | vector buildTrianglesForSingleKeypoint(Keypoint centerKeypoint, vector otherKeypoints, vector previouslyProcessedPoints, double lowerThreshold, double upperThreshold) 225 | { 226 | vector result; 227 | vector currentProcessedPoints;//a collection of points we have processed since entering this function 228 | for (auto iterKeypoint : otherKeypoints) 229 | { 230 | if (isInKeypointExcludeList(iterKeypoint, previouslyProcessedPoints) || iterKeypoint == centerKeypoint) { 231 | continue; 232 | } 233 | 234 | double distance = getKeypointDistance(iterKeypoint, centerKeypoint); 235 | if (distance > lowerThreshold && distance < upperThreshold) 236 | { 237 | vector finalKeypoints = findKeypointsWithInRangeFromTwoPoints(iterKeypoint, centerKeypoint, otherKeypoints, lowerThreshold, upperThreshold); 238 | for (auto finKp : finalKeypoints) 239 | { 240 | //check if this combination of points will make a triangle we have already created 241 | if (shouldPointBeExcluded(finKp, previouslyProcessedPoints, currentProcessedPoints, centerKeypoint, iterKeypoint)){ 242 | continue; 243 | } 244 | Triangle testingTri(centerKeypoint, iterKeypoint, finKp); 245 | if (testingTri.calcArea() > 1300){ 246 | result.push_back(testingTri); 247 | } 248 | } 249 | } 250 | currentProcessedPoints.push_back(iterKeypoint); 251 | } 252 | return result; 253 | } 254 | 255 | vector buildTrianglesFromKeypoints(vector keypoints, double lowerThreshold = 150, double upperThreshold = 500) 256 | { 257 | vector outputTriangles; 258 | // for (auto keypoint: keypoints) 259 | // { 260 | //FIXME: this multi-threading needs to be improved 261 | #pragma omp parallel 262 | { 263 | vector vec_private; 264 | #pragma omp for nowait schedule(static) 265 | for (unsigned int i = 0; i < keypoints.size(); i++) 266 | { 267 | vector::const_iterator first = keypoints.begin(); 268 | vector::const_iterator last = keypoints.begin() + i; 269 | vector processedPoints(first, last); 270 | auto keypoint = keypoints[i]; 271 | auto triangles = buildTrianglesForSingleKeypoint(keypoint, keypoints, processedPoints, lowerThreshold, upperThreshold); 272 | for (auto tri : triangles){ 273 | vec_private.push_back(tri); 274 | } 275 | processedPoints.push_back(keypoint); 276 | } 277 | #pragma omp critical 278 | outputTriangles.insert(outputTriangles.end(), vec_private.begin(), vec_private.end()); 279 | } 280 | return outputTriangles; 281 | } 282 | 283 | vector buildTrianglesFromKeypointJsonFile(string filename) 284 | { 285 | vector output = readKeypointsFromJsonFile(filename); 286 | vector ret = buildTrianglesFromKeypoints(output, 50, 400); 287 | return ret; 288 | } 289 | 290 | //TODO: see if we can remove this 291 | vector getTriangles(string filename) 292 | { 293 | return buildTrianglesFromKeypointJsonFile(filename); 294 | } 295 | 296 | 297 | 298 | ShapeAndPositionInvariantImage getLoadedImage(string imageFullPath) 299 | { 300 | cout << "Loading image: " << imageFullPath; 301 | cv::Mat img = cv::imread(imageFullPath); 302 | cout << " ... done" << endl; 303 | return ShapeAndPositionInvariantImage("", img, std::vector(), ""); 304 | } 305 | 306 | template 307 | const vector readJsonHashesFile(std::ifstream *file) 308 | { 309 | vector ret; 310 | vector image1OutputTriangles; 311 | vector image2OutputTriangles; 312 | try { 313 | boost::property_tree::ptree pt; 314 | boost::property_tree::read_json(*file, pt); 315 | 316 | for (auto label0 : pt) { 317 | if (label0.first == "output") { 318 | for (auto label1 : label0.second) { 319 | if (label1.first == "imageName") { 320 | //TODO: process the imageName 321 | } 322 | else if (label1.first == "hashes") { 323 | for (auto hash_item : label1.second) { 324 | ret.push_back(T(hash_item.second.get_value())); 325 | } 326 | } 327 | } 328 | } 329 | } 330 | 331 | } 332 | catch (std::exception const &e) { 333 | std::cerr << e.what() << std::endl; 334 | } 335 | 336 | return ret; 337 | } 338 | 339 | template 340 | const vector readJsonHashesFile(const string filename) 341 | { 342 | std::ifstream file(filename); 343 | return readJsonHashesFile(&file); 344 | } 345 | 346 | vector readKeypointsFromJsonFile(std::ifstream *file) 347 | { 348 | vector result; 349 | try { 350 | boost::property_tree::ptree pt; 351 | boost::property_tree::read_json(*file, pt); 352 | 353 | for (auto label0 : pt) { 354 | if (label0.first == "output") { 355 | for (auto label1 : label0.second) { 356 | if (label1.first == "keypoints") { 357 | for (auto kp : label1.second){ 358 | double x, y; 359 | for (auto pt : kp.second){ 360 | if (pt.first == "x"){ 361 | x = pt.second.get_value(); 362 | } 363 | else{ 364 | y = pt.second.get_value(); 365 | } 366 | } 367 | result.push_back(Keypoint(x, y)); 368 | } 369 | } 370 | } 371 | } 372 | } 373 | } 374 | catch (std::exception const &e) { 375 | std::cerr << e.what() << std::endl; 376 | } 377 | return result; 378 | } 379 | 380 | vector readKeypointsFromJsonFile(string filename) 381 | { 382 | std::ifstream file(filename); 383 | return readKeypointsFromJsonFile(&file); 384 | } 385 | 386 | 387 | #endif//utils_utils_hpp 388 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from setuptools import setup, find_packages 3 | 4 | """ 5 | TODO 6 | - copy or link `python` folder to `transformation_invariant_image_search` 7 | """ 8 | 9 | def readme(): 10 | with open('README.md') as f: 11 | return f.read() 12 | 13 | setup( 14 | name='transformation-invariant-image-search', 15 | version='0.0.1', 16 | description='a reverse image search algorithm which performs 2D affine ' 17 | 'transformation-invariant partial image-matching in sublinear time with ' 18 | 'respect to the number of images in our database.', 19 | long_description=readme(), 20 | long_description_content_type="text/markdown", 21 | author='Tom Murphy', 22 | author_email='murphyt7@tcd.ie', 23 | maintainer='Rachmadani Haryono', 24 | maintainer_email='foreturiga@gmail.com', 25 | license='MIT', 26 | url='https://github.com/pippy360/transformationInvariantImageSearch', 27 | packages=find_packages(), 28 | include_package_data=True, 29 | zip_safe=False, 30 | python_requires='>=3.6', 31 | install_requires=[ 32 | 'hiredis', 33 | 'numpy', 34 | 'redis', 35 | 'scikit-learn', 36 | 'scipy', 37 | 'tqdm>=4.29.1', 38 | ], 39 | entry_points={ 40 | 'console_scripts': [ 41 | 'transformation-invariant-image-search = transformation_invariant_image_search.main:main'] 42 | }, 43 | classifiers=[ 44 | 'Development Status :: 3 - Alpha', 45 | 'Environment :: Console', 46 | 'Environment :: Web Environment', 47 | 'Intended Audience :: Developers', 48 | 'Intended Audience :: End Users/Desktop', 49 | 'License :: OSI Approved :: MIT License', 50 | 'Natural Language :: English', 51 | 'Operating System :: OS Independent', 52 | 'Programming Language :: Python :: 3', 53 | 'Programming Language :: Python :: 3 :: Only', 54 | 'Programming Language :: Python :: 3.5', 55 | 'Programming Language :: Python :: 3.6', 56 | 'Topic :: Internet :: WWW/HTTP :: Indexing/Search', 57 | 'Topic :: Utilities' 58 | ] 59 | ) 60 | -------------------------------------------------------------------------------- /transformation_invariant_image_search/README.md: -------------------------------------------------------------------------------- 1 | This is a python implementation of the fullEndToEndDemo code. 2 | 3 | $ pip install -r requirements.txt 4 | $ python main.py insert ../fullEndToEndDemo/inputImages/{mona,van_gogh}.jpg 5 | $ python main.py lookup ../fullEndToEndDemo/inputImages/monaComposite.jpg 6 | -------------------------------------------------------------------------------- /transformation_invariant_image_search/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pippy360/transformationInvariantImageSearch/10800ace74441382a41be1a48fe2e01cd8e89a9f/transformation_invariant_image_search/__init__.py -------------------------------------------------------------------------------- /transformation_invariant_image_search/curvature.py: -------------------------------------------------------------------------------- 1 | """This file takes in a function (represented as a number of points and approximated using a spline) 2 | and returns the local maximums of curvature. 3 | """ 4 | 5 | import numpy as np 6 | from scipy.interpolate import UnivariateSpline 7 | from scipy.integrate import cumtrapz 8 | from scipy.signal import argrelextrema 9 | 10 | 11 | SMOOTHING_PARAMETERIZATION_T = None 12 | SMOOTHING_PARAMETERIZATION_S = None 13 | 14 | 15 | def convert_t_to_arc_length(t_list, fx_t, fy_t, sub_divide=1): 16 | t = np.arange(len(t_list)) * sub_divide 17 | dfx = fx_t.derivative(1) 18 | dfy = fy_t.derivative(1) 19 | y_vals = np.sqrt(dfx(t) ** 2 + dfy(t) ** 2) 20 | 21 | return cumtrapz(y_vals, t_list, initial=0) 22 | 23 | 24 | def get_parameterized_function(t, x_pts, y_pts, smoothing=None): 25 | fx_t = UnivariateSpline(t, x_pts, k=3, s=smoothing) 26 | fy_t = UnivariateSpline(t, y_pts, k=3, s=smoothing) 27 | return fx_t, fy_t 28 | 29 | 30 | def calculate_curvature(arc_length, fx_s, fy_s): 31 | # Note: curvature points won't be equidistant if the arc_length_list isn't 32 | 33 | x_ = fx_s.derivative(1)(arc_length) 34 | x__ = fx_s.derivative(2)(arc_length) 35 | y_ = fy_s.derivative(1)(arc_length) 36 | y__ = fy_s.derivative(2)(arc_length) 37 | 38 | return abs(x_ * y__ - y_ * x__) / np.power(x_ ** 2 + y_ ** 2, 3 / 2) 39 | 40 | 41 | def parameterize_function_wrt_arc_length(x, y): 42 | t = np.arange(x.shape[0]) 43 | fx, fy = get_parameterized_function(t, x, y, SMOOTHING_PARAMETERIZATION_T) 44 | 45 | # for each point (inputX[i], inputY[i]) the "arc_length" 46 | # gives use the arc length from 0 to that point 47 | arc_length = convert_t_to_arc_length(t, fx, fy) 48 | fx_s, fy_s = get_parameterized_function(arc_length, fx(t), fy(t), SMOOTHING_PARAMETERIZATION_S) 49 | 50 | curvature = calculate_curvature(arc_length, fx_s, fy_s) 51 | 52 | return x, y, curvature 53 | 54 | 55 | def local_maxima_of_curvature(pts, number_of_pixels_per_unit=1): 56 | '''Get the local maximums of curvature''' 57 | 58 | # set the scale 59 | input_x = pts[:, 0] / number_of_pixels_per_unit 60 | input_y = pts[:, 1] / number_of_pixels_per_unit 61 | 62 | # parameterize the function W.R.T. arc length 63 | xs, ys, curvature = parameterize_function_wrt_arc_length(input_x, input_y) 64 | 65 | # get the local maximums of curvature 66 | local_maxima = argrelextrema(curvature, np.greater, order=2) 67 | 68 | local_maxima_indexes = local_maxima[0] 69 | xs_maxima = xs[local_maxima_indexes] 70 | ys_maxima = ys[local_maxima_indexes] 71 | 72 | # fin_pts += zip(xs_maxima, ys_maxima) 73 | 74 | return xs_maxima, ys_maxima 75 | 76 | -------------------------------------------------------------------------------- /transformation_invariant_image_search/keypoints.py: -------------------------------------------------------------------------------- 1 | """This file gets 2D affine transformation-invariant keypoints for a given image. 2 | It's really just a bunch of hacks thrown together that works well enough for the proof of concept. 3 | The whole proof of concept would be drastically better with a well designed 2D affine 4 | transformation-invariant keypointing algorithm. 5 | """ 6 | 7 | import sys 8 | import json 9 | 10 | import cv2 11 | import numpy as np 12 | 13 | from . import curvature 14 | 15 | 16 | PIXEL_VALS = [ 17 | 16, 124, 115, 68, 98, 176, 225, 55, 50, 53, 129, 19, 57, 160, 143, 237, 18 | 75, 164, 206, 167, 103, 140, 90, 112, 244, 240, 107, 202, 185, 72, 71, 19 | 109, 74, 183, 205, 46, 121, 180, 142, 126, 38, 247, 166, 144, 67, 134, 20 | 194, 198, 23, 186, 33, 163, 24, 117, 37, 76, 147, 47, 52, 42, 70, 108, 21 | 30, 54, 89, 59, 73, 91, 151, 6, 173, 86, 182, 178, 10, 207, 171, 13, 77, 22 | 88, 159, 125, 11, 188, 238, 41, 92, 118, 201, 132, 48, 28, 195, 17, 119, 23 | 64, 25, 45, 114, 80, 187, 105, 204, 158, 20, 169, 83, 191, 199, 234, 136, 24 | 81, 252, 141, 242, 219, 138, 161, 154, 135, 63, 153, 239, 130, 223, 249, 25 | 122, 93, 216, 127, 111, 15, 12, 8, 44, 193, 245, 0, 235, 120, 31, 165, 3, 26 | 155, 43, 26, 152, 94, 29, 232, 35, 218, 230, 233, 214, 217, 7, 156, 189, 27 | 228, 137, 209, 145, 226, 97, 215, 170, 51, 224, 100, 61, 69, 250, 4, 34, 28 | 56, 255, 60, 84, 110, 203, 222, 133, 248, 106, 212, 87, 253, 208, 101, 116, 29 | 251, 190, 99, 32, 113, 157, 27, 79, 82, 146, 149, 5, 210, 65, 22, 181, 131, 30 | 62, 36, 184, 196, 231, 192, 66, 213, 2, 254, 174, 211, 236, 229, 58, 221, 31 | 21, 150, 123, 175, 177, 179, 246, 96, 227, 1, 18, 241, 49, 128, 78, 40, 32 | 14, 162, 85, 39, 172, 104, 9, 200, 220, 139, 168, 95, 243, 197, 148, 102 33 | ] 34 | 35 | 36 | def recolour(img, gauss_width=41): 37 | pixel_vals = np.array(PIXEL_VALS, dtype=img.dtype) 38 | div = 40 39 | size = div * (len(pixel_vals) // div) 40 | 41 | for i in range(0, size, div): 42 | pixel_vals[i:i + div] = pixel_vals[i] 43 | 44 | pixel_vals[size:] = pixel_vals[size] 45 | 46 | img = cv2.GaussianBlur(img, (gauss_width, gauss_width), 0) 47 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 48 | vals = pixel_vals[gray] 49 | 50 | # v = vals[i, j] 51 | # img[i, j, 2 - v % 3] = v 52 | m = np.fliplr(np.identity(3, dtype=img.dtype)) 53 | img = m[vals % 3] * vals[:, :, np.newaxis] 54 | 55 | return img 56 | 57 | 58 | def compute_keypoints(img): 59 | gauss_width = 21 60 | img = recolour(img, gauss_width) 61 | b, _, _ = cv2.split(img) 62 | 63 | points = compute_keypoints_internal(b) 64 | # points.extend(compute_keypoints_internal(g)) 65 | # points.extend(compute_keypoints_internal(r)) 66 | 67 | return points 68 | 69 | 70 | def find_contours(*args, **kwargs): 71 | # opencv 2: contours, hierarchy 72 | # opencv 3: image, contours, hierarchy 73 | # opencv 4: contours, hierarchy 74 | # https://docs.opencv.org/4.0.0/d3/dc0/group__imgproc__shape.html 75 | r = cv2.findContours(*args, **kwargs) 76 | return r['4' > cv2.__version__ >= '3'] 77 | 78 | 79 | def compute_keypoints_internal(single_channel_image): 80 | ret, img = cv2.threshold(single_channel_image, 127, 255, cv2.THRESH_BINARY) 81 | contours = find_contours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) 82 | 83 | area_here = 400 84 | contours = [c for c in contours if cv2.contourArea(c) > area_here] 85 | 86 | fin_contours = [] 87 | 88 | for cnt in contours: 89 | M = cv2.moments(cnt) 90 | c_x = int(M["m10"] / M["m00"]) 91 | c_y = int(M["m01"] / M["m00"]) 92 | fin_contours.append((c_x, c_y)) 93 | 94 | for cnt in contours: 95 | ret = np.array([(pt[0][0], pt[0][1]) for pt in cnt]) 96 | xcoords, ycoords = curvature.local_maxima_of_curvature(ret) 97 | fin_contours += zip(xcoords, ycoords) 98 | 99 | return fin_contours 100 | 101 | 102 | def dump_keypoints(img, filename): 103 | keypoints = [{'x': p[0], 'y': p[1]} for p in compute_keypoints(img)] 104 | output = {'output': {'keypoints': keypoints}} 105 | 106 | with open(filename, 'w+') as f: 107 | json.dump(output, f) 108 | 109 | 110 | def main(): 111 | if len(sys.argv) < 3: 112 | print("you need to pass in an image path!!!! and also an output path for the json") 113 | return -1 114 | 115 | img = cv2.imread(sys.argv[1]) 116 | dump_keypoints(img, sys.argv[2]) 117 | 118 | 119 | if __name__ == '__main__': 120 | main() 121 | -------------------------------------------------------------------------------- /transformation_invariant_image_search/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: main.py lookup ... 3 | main.py insert ... 4 | """ 5 | import sys 6 | import multiprocessing 7 | from collections import Counter 8 | from os import cpu_count 9 | 10 | import cv2 11 | import redis 12 | import numpy as np 13 | 14 | from .keypoints import compute_keypoints 15 | from .phash import triangles_from_keypoints, hash_triangles 16 | 17 | 18 | def phash_triangles(img, triangles, batch_size=None): 19 | n = len(triangles) 20 | 21 | if batch_size is None: 22 | batch_size = n // cpu_count() 23 | 24 | array = np.asarray(triangles, dtype='d') 25 | tasks = [(img, array[i:i + batch_size]) for i in range(0, n, batch_size)] 26 | results = [] 27 | 28 | with multiprocessing.Pool(processes=cpu_count()) as p: 29 | for result in p.starmap(hash_triangles, tasks): 30 | results += result 31 | 32 | return results 33 | 34 | 35 | def pipeline(r, data, chunk_size): 36 | npartitions = len(data) // chunk_size 37 | pipe = r.pipeline() 38 | 39 | for chunk in np.array_split(data, npartitions or 1): 40 | yield pipe, chunk 41 | 42 | 43 | def insert(chunks, filename): 44 | n = 0 45 | 46 | for pipe, keys in chunks: 47 | for key in keys: 48 | pipe.sadd(key, filename) 49 | 50 | n += sum(pipe.execute()) 51 | 52 | print(f'added {n} fragments for {filename}') 53 | 54 | 55 | def lookup(chunks, filename): 56 | count = Counter() 57 | 58 | for pipe, keys in chunks: 59 | for key in keys: 60 | pipe.smembers(key) 61 | 62 | for result in pipe.execute(): 63 | count.update(result) 64 | 65 | print(f'matches for {filename}:') 66 | 67 | for key, num in count.most_common(): 68 | print(f'{num:<10d} {key.decode("utf-8")}') 69 | 70 | 71 | def main(): 72 | if len(sys.argv) < 3: 73 | print(__doc__) 74 | exit(1) 75 | 76 | command, *filenames = sys.argv[1:] 77 | command = insert if command == 'insert' else lookup 78 | 79 | r = redis.StrictRedis(host='localhost', port=6379, db=0) 80 | try: 81 | r.ping 82 | except redis.ConnectionError: 83 | print('You need to install redis.') 84 | return 85 | 86 | for filename in filenames: 87 | print('loading', filename) 88 | img = cv2.imread(filename) 89 | 90 | keypoints = compute_keypoints(img) 91 | triangles = triangles_from_keypoints(keypoints, lower=50, upper=400) 92 | hashes = phash_triangles(img, triangles) 93 | chunks = pipeline(r, hashes, chunk_size=1e5) 94 | 95 | print() 96 | command(chunks, filename) 97 | 98 | 99 | if __name__ == '__main__': 100 | main() 101 | -------------------------------------------------------------------------------- /transformation_invariant_image_search/phash.py: -------------------------------------------------------------------------------- 1 | from sklearn.neighbors import BallTree 2 | import cv2 3 | import numpy as np 4 | import tqdm 5 | 6 | 7 | HEX_STRINGS = np.array([f'{x:02x}' for x in range(256)]) 8 | BIN_POWERS = 2 ** np.arange(8) 9 | 10 | 11 | def phash(image, hash_size=8, highfreq_factor=4): 12 | img_size = hash_size * highfreq_factor 13 | image = cv2.resize(image, (img_size, img_size)) 14 | image = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY if image.ndim == 4 else cv2.COLOR_BGR2GRAY) 15 | 16 | dct = cv2.dct(image.astype(float)) 17 | dctlowfreq = dct[:hash_size, :hash_size] 18 | dctlowfreq[0, 0] = 0 19 | 20 | return dctlowfreq > np.mean(dctlowfreq) 21 | 22 | 23 | def hash_to_hex(a): 24 | index = np.sum(a * BIN_POWERS, axis=2) 25 | return [''.join(x) for x in HEX_STRINGS[index]] 26 | 27 | 28 | def hash_triangles(img, triangles): 29 | n = len(triangles) 30 | triangles = np.asarray(triangles) 31 | 32 | # basically the return value 33 | hash_size = 8 34 | hash_img_size = 32, 32 35 | low_freq_dct = np.empty((3, n, hash_size, hash_size)) 36 | 37 | # size of the target image for affine transform 38 | size = width, height = int(60 * 0.86), 60 39 | 40 | # helper matrices 41 | empty_n_identity33 = np.empty((n, 3, 3)) 42 | empty_n_identity33[:, :] = np.identity(3) 43 | 44 | target_points = empty_n_identity33.copy() 45 | target_points[:, :2, 0] = width / 2, height 46 | target_points[:, :2, 1] = width, 0 47 | 48 | input_points = empty_n_identity33.copy() 49 | transpose_m = empty_n_identity33 50 | 51 | # rotate triangles 3 times, one for each edge of the triangle 52 | rotations = (0, 1, 2), (1, 2, 0), (2, 0, 1) 53 | 54 | for i, rotation in enumerate(tqdm.tqdm(rotations)): 55 | p = triangles[:, rotation, :] 56 | 57 | p0 = p[:, 0] 58 | p1 = p[:, 1] - p0 59 | p2 = p[:, 2] - p0 60 | 61 | # if p1 is to the right of p2, then switch 62 | _ = np.cross(p1, p2 - p1) > 0 63 | p1[_], p2[_] = p2[_], p1[_] 64 | 65 | # calc_transformation_matrix 66 | transpose_m[:, :2, 2] = -p0 67 | input_points[:, :2, 0] = p1 68 | input_points[:, :2, 1] = p2 69 | 70 | input_points_inverse = np.linalg.inv(input_points) 71 | transform = target_points @ input_points_inverse @ transpose_m 72 | transform = transform[:, :2, :] 73 | 74 | for k in tqdm.tqdm(range(n)): 75 | image = cv2.warpAffine(img, transform[k], size) 76 | 77 | # calculate dct for perceptual hash 78 | image = cv2.resize(image, hash_img_size) 79 | image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 80 | dct = cv2.dct(image.astype(float)) 81 | low_freq_dct[i, k] = dct[:hash_size, :hash_size] 82 | 83 | # calculate perceptual hash for every triangle 84 | low_freq_dct = low_freq_dct.reshape(3 * n, hash_size, hash_size) 85 | low_freq_dct[:, 0, 0] = 0 86 | mean = np.mean(low_freq_dct, axis=(1, 2)) 87 | hashes = low_freq_dct > mean[:, None, None] 88 | 89 | return hash_to_hex(hashes) 90 | 91 | 92 | def triangles_from_keypoints(keypoints, lower=50, upper=400): 93 | keypoints = np.asarray(keypoints, dtype=float) 94 | 95 | tree = BallTree(keypoints, leaf_size=10) 96 | i_lower = tree.query_radius(keypoints, r=lower) 97 | i_upper = tree.query_radius(keypoints, r=upper) 98 | in_range = [set(u) - set(l) for l, u in zip(i_lower, i_upper)] 99 | 100 | seen = set() 101 | result = [] 102 | 103 | for i, center in enumerate(keypoints): 104 | seen.add(i) 105 | 106 | in_range_of_center = in_range[i] - seen 107 | if not in_range_of_center: 108 | continue 109 | 110 | processed = set() 111 | 112 | for j in in_range_of_center: 113 | if j < i + 1: 114 | continue 115 | 116 | points_idx = in_range[j] & in_range_of_center - processed 117 | if not points_idx: 118 | continue 119 | 120 | keypoint = keypoints[j] 121 | points = keypoints[list(points_idx)] 122 | area = np.absolute(np.cross(points - center, points - keypoint)) / 2 123 | result += [(center, keypoint, p) for p in points[area > 1300]] 124 | 125 | processed.add(j) 126 | 127 | return result 128 | -------------------------------------------------------------------------------- /transformation_invariant_image_search/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scikit-learn 3 | scipy 4 | hiredis 5 | redis 6 | --------------------------------------------------------------------------------