├── .gitignore ├── LICENSE.txt ├── README.rdoc ├── ebin └── erlmongo.app ├── logo ├── erlmongo-logomark.png ├── erlmongo-logotype-horizontal.png └── erlmongo-logotype-vertical.png ├── rebar.config └── src ├── bson.erl ├── erlmongo.app.src ├── erlmongo.hrl ├── erlmongo_app.erl ├── mongoapi.erl ├── mongodb.erl └── mongodb_supervisor.erl /.gitignore: -------------------------------------------------------------------------------- 1 | *.beam 2 | 3 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | 204 | -------------------------------------------------------------------------------- /README.rdoc: -------------------------------------------------------------------------------- 1 | https://github.com/Tobaloidee/erlmongo/blob/master/logo/erlmongo-logotype-horizontal.png 2 | = Info 3 | 4 | Erlmongo is a pretty complete Erlang driver for mongodb. 5 | 6 | All save/update/insert are safe and return if write succeeded. 7 | 8 | It supports maps and proplists as datatypes. Strings can be lists or binaries, but strings received from mongodb (as a result of find) will be binaries. 9 | 10 | Connections are pools (def. size 10) to master: 11 | - master/slave - read and write from master 12 | - master/master - pick a master at random and use it for everything 13 | - replica pairs/sets - find out which is master and connect to it 14 | 15 | Always use an atom for naming pools. Name will be used for a public named ets table. 16 | Runtime connection API: 17 | mongodb:singleServer/1,2 18 | mongodb:replicaPairs/3 19 | mongodb:replicaSets/2 20 | mongodb:masterSlave/3 21 | mongodb:connect/1 22 | mongodb:sharded/2 23 | 24 | = Replica sets 25 | % List of servers does not have to be the entire list of the replica set. 26 | % Erlmongo will read the primary server from them and connect to it (even if not in the list). 27 | mongodb:replicaSets(mypool,["127.0.0.1:30000","127.0.0.1:30001"]). 28 | mongodb:connect(mypool). 29 | 30 | = Examples 31 | make 32 | erl 33 | application:start(erlmongo). 34 | % Set mongodb server info. singleServer(PoolName) is the same as singleServer(PoolName,10,"localhost:27017") 35 | % NOTICE: Erlang 21 disabled tuple calls. You must call with mongoapi and last parameter Mong, e.g. mongoapi:count("user", Mong). 36 | mongodb:singleServer(def). 37 | mongodb:connect(def). 38 | % Create an interface for test database (it has to be a binary) 39 | Mong = mongoapi:new(def,<<"test">>). 40 | 41 | % Save a new document 42 | Mong:save("mycollection",#{name => "MyDocument", i => 10}). 43 | % Return the document in map form 44 | Mong:findOne("mycollection", #{i => 10}, map). 45 | 46 | % With proplists 47 | Mong:save("mydoc", [{"name", "MyDocument"}, {"i", 10}]). 48 | % Return only _id and name field 49 | Mong:findOne("mydoc", [{"i", 10}], [{"name", 1}]). 50 | 51 | % Set Index. First parameter is so that the driver knows what collection 52 | % we mean. If you have an already constructed record laying around use that. 53 | % No need to construct a new record just so the driver can read the name. 54 | % Second parameter the index we wish to create. 1 = ascending, -1 = descending. 55 | Mong:ensureIndex("mycollection", [{#mydoc.i, 1}, {#mydoc.name, -1}]) 56 | 57 | % Find examples: 58 | 59 | % Or 60 | M:find("mycollection",[{'or',[{"a",1},{"i",11}]}],undefined,0,100). 61 | 62 | % Parameters: Search criteria, field selector, docs to skip, docs to return 63 | Mong:find("mycollection",#{i => 4}, #{name => 1}, 0, 10). 64 | 65 | % Find with options 66 | Mong:findOpt("mycollection", #{i => 4}, undefined, [explain], 0, 0). 67 | 68 | % Embedded records 69 | Mong:save("mycollection",#{name => "zembedom", i => 10, address = #{city => "ny", street => "some", country => "us"}}). 70 | Mong:find("mycollection",#{address => #{city => "la"}}, undefined, 0, 0). 71 | 72 | % Advanced queries (supported: gt, lt, gte, lte, ne, in, nin, all, size, exists): 73 | % Documents with even i 74 | Mong:find("mycollection",#{i => {mod, 2, 0}}, undefined, 0,0). 75 | % Documents with i larger than 2: 76 | Mong:find("mycollection", #{i => {gt, 2}}, undefined, 0,0). 77 | % Documents with i between 2 and 5: 78 | Mong:find("mycollection", #{i => {in, {gt, 2}, {lt, 5}}}, undefined, 0,0). 79 | % in example: 80 | Mong:find("mycollection", #{tags => {in, [2,3,4]}}, undefined, 0,0). 81 | % exists example: 82 | Mong:find("mycollection", #{tags => {exists, false}}, undefined, 0,0). 83 | 84 | % findandmodify command 85 | Mong:runCmd([{"findandmodify", "collectionname"},{"query", [{"fn","onmeta.flv"},{"ch","somechan"}]},{"remove",1}]). 86 | 87 | % GridFS 88 | % Always run this on collection before writing the first file 89 | Mong:gfsIndexes(). 90 | {ok, Bin} = file:read_file("SomeFile"). 91 | % To open file for writing, use gfsNew 92 | PID = Mong:gfsNew("myfile"). 93 | % You can set parameters: mime, meta (embedded document), aliases (array of names), chunk size (default 256k) 94 | % flushLimit (at which buffer size data gets flushed to mongodb, def. 1MB) 95 | % PID = Mong:gfsNew("myfile", [{chunkSize, 100}]). 96 | % You can also set collection name (default is fd) 97 | % PID = Mong:gfsNew("myfilecol", "myfile", []). 98 | Mong:gfsWrite(PID,Bin). 99 | Mong:gfsClose(PID). 100 | % Reading 101 | PID = Mong:gfsOpen(#gfs_file{filename = "myfile"}). 102 | Res = Mong:gfsRead(PID,100000). 103 | Mong:gfsClose(PID). 104 | 105 | = Supported Data types and modifiers 106 | Look at bson:encode_element/1 107 | 108 | = SSL support 109 | You can use application environment variables or use set_ssl to establish connections over ssl before you connect with mongodb:connect. 110 | mongodb:set_ssl(true). % for ssl options use mongodb:set_ssl(true, SslOpts). 111 | 112 | 113 | = Switching to mochijson's style proplists 114 | Mochijson library has a different string/list encoding convention: 115 | string = binary 116 | array = list 117 | You can choose a preferred way to encode with mongoapi:set_encode_style/1. 118 | Selected style is set to a selected server and table and not for mongoapi module instance. 119 | An example: 120 | ... 121 | 12> Mong:set_encode_style(default). 122 | ok 123 | 13> Mong:save("foobar", [{<<"data">>, [[1.1, 2.2], [3.3, 4.4]]}]). 124 | ** exception error: bad argument 125 | in function unicode:characters_to_binary/1 126 | called as unicode:characters_to_binary([[1.1,2.2],[3.3,4.4]]) 127 | in call from mongodb:encode_cstring/1 128 | in call from mongodb:encode_element/1 129 | in call from mongodb:'-encode/2-fun-0-'/3 130 | in call from lists:foldl/3 131 | in call from mongodb:encode/2 132 | in call from mongoapi:save/3 133 | 14> Mong:set_encode_style(mochijson). 134 | ok 135 | 15> Mong:save("foobar", [{<<"data">>, [[1.1, 2.2], [3.3, 4.4]]}]). 136 | {oid,<<"000af08b902dee723e000006">>} 137 | 138 | 139 | == Supported operation list 140 | Collections 141 | * remove 142 | * save 143 | * insert 144 | * update 145 | * batchInsert 146 | * ensureIndex 147 | * deleteIndex 148 | * deleteIndexes 149 | * count 150 | * dropCollection 151 | * createCollection 152 | * group 153 | 154 | Search 155 | * find 156 | * findopt 157 | * cursor - getMore - closeCursor 158 | * findOne 159 | 160 | DB 161 | * eval 162 | * stats 163 | * runCmd 164 | * repairDatabase 165 | * cloneDatabase 166 | * dropDatabase 167 | * addUser 168 | * setProfilingLevel 169 | * getProfilingLevel 170 | 171 | GridFS 172 | * gdsIndexes 173 | * gfsNew 174 | * gfsWrite 175 | * gfsOpen 176 | * gfsRead 177 | * gfsDelete 178 | * gfsFlush 179 | * gfsClose 180 | 181 | == Author 182 | Sergej Jurečko 183 | -------------------------------------------------------------------------------- /ebin/erlmongo.app: -------------------------------------------------------------------------------- 1 | {application,erlmongo, 2 | [{description,"Erlang driver for mongodb"}, 3 | {vsn,"0.2"}, 4 | {modules,[bson,erlmongo_app,mongoapi,mongodb, 5 | mongodb_supervisor]}, 6 | {registered,[mongodb,mongodb_supervisor]}, 7 | {applications,[kernel,stdlib]}, 8 | {mod,{erlmongo_app,[]}}, 9 | {start_phases,[]}]}. 10 | -------------------------------------------------------------------------------- /logo/erlmongo-logomark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SergejJurecko/erlmongo/f0d03cd4592f7bf28059b81214b61c28ccf046c0/logo/erlmongo-logomark.png -------------------------------------------------------------------------------- /logo/erlmongo-logotype-horizontal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SergejJurecko/erlmongo/f0d03cd4592f7bf28059b81214b61c28ccf046c0/logo/erlmongo-logotype-horizontal.png -------------------------------------------------------------------------------- /logo/erlmongo-logotype-vertical.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SergejJurecko/erlmongo/f0d03cd4592f7bf28059b81214b61c28ccf046c0/logo/erlmongo-logotype-vertical.png -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {erl_opts, [{src_dirs, ["src"]}]}. 2 | 3 | {cover_enabled, true}. 4 | 5 | {lib_dirs, []}. 6 | 7 | {erl_opts, []}. 8 | 9 | {deps, []}. 10 | -------------------------------------------------------------------------------- /src/bson.erl: -------------------------------------------------------------------------------- 1 | -module(bson). 2 | -export([encode/1, decode/1, encode/2, decode/2, dec2hex/2, hex2dec/2]). 3 | -export([recfields/1, encoderec/1, encode_findrec/1, encoderec_selector/2, 4 | decoderec/2, gen_keyname/2, gen_prop_keyname/2, recoffset/1]). 5 | -include_lib("erlmongo.hrl"). 6 | 7 | encode(undefined) -> 8 | <<>>; 9 | encode(<<>>) -> 10 | <<>>; 11 | encode(Items) -> 12 | encode(Items, default). 13 | 14 | encode(#{} = Items, S) -> 15 | encode(maps:to_list(Items), S); 16 | encode(Items, Style) -> 17 | Bin = lists:foldl(fun(Item, B) -> <> end, <<>>, Items), 18 | <<(byte_size(Bin)+5):32/little-signed, Bin/binary, 0:8>>. 19 | 20 | % mochijson behaviour 21 | encode_element({Name, [{_,_}|_] = Items}, mochijson) -> 22 | Binary = encode(Items, mochijson), 23 | <<3, Name/binary, 0, Binary/binary>>; 24 | 25 | encode_element({Name, Items}, mochijson) when is_list(Items) -> 26 | <<4, Name/binary, 0, (encarray([], Items, 0, mochijson))/binary>>; 27 | 28 | encode_element(A, _Style) -> 29 | encode_element(A). %fallback 30 | 31 | % default behaviour 32 | encode_element({[_|_] = Name, Val}) -> 33 | encode_element({list_to_binary(Name),Val}); 34 | encode_element({'or',[{_,_}|_] = L}) -> 35 | encode_element({<<"$or">>,{array,[[Obj] || Obj <- L]}}); 36 | encode_element({Name, Val}) when is_atom(Name) -> 37 | encode_element({atom_to_binary(Name, utf8),Val}); 38 | encode_element({<<_/binary>> = Name, #{} = Items}) -> 39 | Binary = encode(maps:to_list(Items)), 40 | <<3, Name/binary, 0, Binary/binary>>; 41 | encode_element({<<_/binary>> = Name, [O|_] = Items}) when is_tuple(O) -> 42 | Binary = encode(Items), 43 | <<3, Name/binary, 0, Binary/binary>>; 44 | encode_element({Name, []}) -> 45 | <<2, Name/binary, 0, 1:32/little-signed, 0>>; 46 | encode_element({Name, [M|_] = Items}) when is_map(M) -> 47 | <<4, Name/binary, 0, (encarray([], Items, 0))/binary>>; 48 | encode_element({<<_/binary>> = Name, [_|_] = Value}) -> 49 | ValueEncoded = encode_cstring(Value), 50 | <<2, Name/binary, 0, (byte_size(ValueEncoded)):32/little-signed, ValueEncoded/binary>>; 51 | encode_element({Name, <<_/binary>> = Value}) -> 52 | ValueEncoded = encode_cstring(Value), 53 | <<2, Name/binary, 0, (byte_size(ValueEncoded)):32/little-signed, ValueEncoded/binary>>; 54 | encode_element({Name, true}) -> 55 | <<8, Name/binary, 0, 1:8>>; 56 | encode_element({Name, false}) -> 57 | <<8, Name/binary, 0, 0:8>>; 58 | encode_element({Name, null}) -> 59 | <<10, Name/binary, 0>>; 60 | encode_element({Name,min}) -> 61 | <<255, Name/binary, 0>>; 62 | encode_element({Name,max}) -> 63 | <<127, Name/binary, 0>>; 64 | encode_element({Name, Value}) when is_atom(Value) -> 65 | ValueEncoded = encode_cstring(atom_to_binary(Value,utf8)), 66 | <<2, Name/binary, 0, (byte_size(ValueEncoded)):32/little-signed, ValueEncoded/binary>>; 67 | encode_element({Name, Value}) when is_integer(Value) -> 68 | case true of 69 | _ when Value >= 2147483648; Value =< -2147483648 -> 70 | <<18, Name/binary, 0, Value:64/little-signed>>; 71 | _ -> 72 | <<16, Name/binary, 0, Value:32/little-signed>> 73 | end; 74 | encode_element({plaintext, Name, Val}) -> % exists for performance reasons. 75 | <<2, Name/binary, 0, (byte_size(Val)+1):32/little-signed, Val/binary, 0>>; 76 | encode_element({Name, {oid, OID}}) -> 77 | <<7, Name/binary, 0, (hex2dec(<<>>, OID))/binary>>; 78 | % list of lists = array 79 | encode_element({Name, {array, Items}}) -> 80 | <<4, Name/binary, 0, (encarray([], Items, 0))/binary>>; 81 | encode_element({Name, {bson, Bin}}) -> 82 | <<3, Name/binary, 0, Bin/binary>>; 83 | encode_element({Name, {binary, 2, Data}}) -> 84 | <<5, Name/binary, 0, (byte_size(Data)+4):32/little-signed, 2:8, (byte_size(Data)):32/little-signed, Data/binary>>; 85 | encode_element({Name,{struct,Items}}) -> 86 | Binary = encode(Items), 87 | <<3, Name/binary, 0, Binary/binary>>; 88 | encode_element({Name, {inc, Val}}) -> 89 | encode_element({<<"$inc">>, [{Name, Val}]}); 90 | encode_element({Name, {set, Val}}) -> 91 | encode_element({<<"$set">>, [{Name, Val}]}); 92 | encode_element({Name, {unset, Val}}) -> 93 | encode_element({<<"$unset">>, [{Name, Val}]}); 94 | encode_element({Name, {push, Val}}) -> 95 | encode_element({<<"$push">>, [{Name, Val}]}); 96 | encode_element({Name, {pushAll, Val}}) -> 97 | encode_element({<<"$pushAll">>, [{Name, {array, Val}}]}); 98 | encode_element({Name, {pop, Val}}) -> 99 | encode_element({<<"$pop">>, [{Name, Val}]}); 100 | encode_element({Name, {pull, Val}}) -> 101 | encode_element({<<"$pull">>, [{Name, Val}]}); 102 | encode_element({Name, {pullAll, Val}}) -> 103 | encode_element({<<"$pullAll">>, [{Name, {array, Val}}]}); 104 | encode_element({Name, {addToSet, {array,Val}}}) -> 105 | encode_element({<<"$addToSet">>, [{Name, [{<<"$each">>, {array, Val}}]}]}); 106 | encode_element({Name, {addToSet, Val}}) -> 107 | encode_element({<<"$addToSet">>, [{Name, Val}]}); 108 | encode_element({Name, {gt, Val}}) -> 109 | encode_element({Name, [{<<"$gt">>, Val}]}); 110 | encode_element({Name, {lt, Val}}) -> 111 | encode_element({Name, [{<<"$lt">>, Val}]}); 112 | encode_element({Name, {lte, Val}}) -> 113 | encode_element({Name, [{<<"$lte">>, Val}]}); 114 | encode_element({Name, {gte, Val}}) -> 115 | encode_element({Name, [{<<"$gte">>, Val}]}); 116 | encode_element({Name, {ne, Val}}) -> 117 | encode_element({Name, [{<<"$ne">>, Val}]}); 118 | encode_element({Name, {in, {FE,FV},{TE,TV}}}) -> 119 | encode_element({Name, [{<<"$", (atom_to_binary(FE,latin1))/binary>>, FV}, 120 | {<<"$", (atom_to_binary(TE,latin1))/binary>>, TV}]}); 121 | encode_element({Name, {in, Val}}) -> 122 | encode_element({Name, [{<<"$in">>, {array, Val}}]}); 123 | encode_element({Name, {nin, Val}}) -> 124 | encode_element({Name, [{<<"$nin">>, {array, Val}}]}); 125 | encode_element({Name, {mod, By,Rem}}) -> 126 | encode_element({Name, [{<<"$mod">>, {array, [By,Rem]}}]}); 127 | encode_element({Name, {all, Val}}) -> 128 | encode_element({Name, [{<<"$all">>, {array, Val}}]}); 129 | encode_element({Name, {size, Val}}) -> 130 | encode_element({Name, [{<<"$size">>, Val}]}); 131 | encode_element({Name, {'not', Val}}) -> 132 | encode_element({Name, [{<<"$not">>, Val}]}); 133 | encode_element({Name, {exists, Val}}) -> 134 | encode_element({Name, [{<<"$exists">>, Val}]}); 135 | encode_element({Name, {binary, Data}}) -> 136 | encode_element({Name, {binary,0, Data}}); 137 | encode_element({Name, {binary, SubType, Data}}) -> 138 | StringEncoded = encode_cstring(Name), 139 | <<5, StringEncoded/binary, (byte_size(Data)):32/little-signed, SubType:8, Data/binary>>; 140 | encode_element({Name, Value}) when is_float(Value) -> 141 | <<1, (Name)/binary, 0, Value:64/little-signed-float>>; 142 | encode_element({Name, {obj, []}}) -> 143 | <<3, Name/binary, 0, (encode([]))/binary>>; 144 | encode_element({Name, {MegaSecs, Secs, MicroSecs}}) when is_integer(MegaSecs),is_integer(Secs),is_integer(MicroSecs) -> 145 | Unix = MegaSecs * 1000000 + Secs, 146 | Millis = Unix * 1000 + (MicroSecs div 1000), 147 | <<9, Name/binary, 0, Millis:64/little-signed>>; 148 | encode_element({Name, {{Yr,Mo,Dy},{Hr,Mi,Sd}}=Value}) 149 | when is_integer(Yr),is_integer(Mo),is_integer(Dy), is_integer(Hr),is_integer(Mi),is_integer(Sd) -> 150 | %% 62167219200 == calendar:datetime_to_gregorian_seconds({{1970,1,1},{0,0,0}}) 151 | Millis = (calendar:datetime_to_gregorian_seconds(Value)-62167219200) * 1000, 152 | <<9, Name/binary, 0, Millis:64/little-signed>>; 153 | encode_element({Name, {regex, Expression, Flags}}) -> 154 | ExpressionEncoded = encode_cstring(Expression), 155 | FlagsEncoded = encode_cstring(Flags), 156 | <<11, Name/binary, 0, ExpressionEncoded/binary, FlagsEncoded/binary>>; 157 | encode_element({Name, {ref, Collection, <>}}) -> 158 | CollectionEncoded = encode_cstring(Collection), 159 | FirstReversed = lists:reverse(binary_to_list(First)), 160 | SecondReversed = lists:reverse(binary_to_list(Second)), 161 | OID = list_to_binary(lists:append(FirstReversed, SecondReversed)), 162 | <<12, Name/binary, 0, (byte_size(CollectionEncoded)):32/little-signed, CollectionEncoded/binary, OID/binary>>; 163 | encode_element({Name, {code, Code}}) -> 164 | CodeEncoded = encode_cstring(Code), 165 | <<13, Name/binary, 0, (byte_size(CodeEncoded)):32/little-signed, CodeEncoded/binary>>; 166 | encode_element({Name,{bignum,Value}}) -> 167 | <<18, Name/binary, 0, Value:64/little-signed>>; 168 | % code with scope 169 | encode_element({Name, {code, C, S}}) -> 170 | Code = encode_cstring(C), 171 | Scope = encode(S), 172 | <<15, Name/binary, 0, (8+byte_size(Code)+byte_size(Scope)):32/little, (byte_size(Code)):32/little, Code/binary, Scope/binary>>. 173 | 174 | 175 | encarray(L, Lst, N) -> %fallback 176 | encarray(L, Lst, N, default). 177 | 178 | encarray(L, [H|T], N, Style) -> 179 | encarray([{list_to_binary(integer_to_list(N)), H}|L], T, N+1, Style); 180 | encarray(L, [], _, Style) -> 181 | encode(lists:reverse(L), Style). 182 | 183 | encode_cstring(String) -> 184 | <<(unicode:characters_to_binary(String))/binary, 0:8>>. 185 | 186 | %% Size has to be greater than 4 187 | % decode(<> = Binary) when byte_size(Binary) >= Size, Size > 4 -> 188 | % decode(Rest, Size-4); 189 | % 190 | % decode(_BadLength) -> 191 | % throw({invalid_length}). 192 | % 193 | % decode(Binary, _Size) -> 194 | % case decode_next(Binary, []) of 195 | % {BSON, <<>>} -> 196 | % [BSON]; 197 | % {BSON, Rest} -> 198 | % [BSON | decode(Rest)] 199 | % end. 200 | decode(Bin) -> 201 | decode(proplist,Bin). 202 | decode(Format,Bin) -> 203 | % io:format("Decoding ~p~n", [Bin]), 204 | decode(Format,Bin,[]). 205 | decode(Format,<<_Size:32, Bin/binary>>, L) -> 206 | {BSON, Rem} = decode_next(Format,Bin, []), 207 | decode(Format, Rem,[BSON|L]); 208 | decode(_Format,<<>>, L) -> 209 | lists:reverse(L). 210 | 211 | decode_next(proplist,<<>>, Accum) -> 212 | {lists:reverse(Accum), <<>>}; 213 | decode_next(map,<<>>, Accum) -> 214 | {maps:from_list(Accum), <<>>}; 215 | decode_next(proplist,<<0:8, Rest/binary>>, Accum) -> 216 | {lists:reverse(Accum), Rest}; 217 | decode_next(map,<<0:8, Rest/binary>>, Accum) -> 218 | {maps:from_list(Accum), Rest}; 219 | decode_next(Format,<>, Accum) -> 220 | {Name, EncodedValue} = decode_cstring(Rest, <<>>), 221 | % io:format("Decoding ~p~n", [Type]), 222 | {Value, Next} = decode_value(Format,Type, EncodedValue), 223 | decode_next(Format,Next, [{Name, Value}|Accum]). 224 | 225 | decode_cstring(<<>>, _) -> 226 | throw({invalid_cstring}); 227 | decode_cstring(<<0:8, Rest/binary>>, Acc) -> 228 | {Acc, Rest}; 229 | decode_cstring(<>, Acc) -> 230 | decode_cstring(Rest, <>). 231 | 232 | decode_value(_F,7, <>) -> 233 | {{oid, dec2hex(<<>>, OID)}, Rest}; 234 | decode_value(_F,16, <>) -> 235 | {Integer, Rest}; 236 | decode_value(_F,18, <>) -> 237 | {Integer, Rest}; 238 | decode_value(_F,1, <>) -> 239 | {Double, Rest}; 240 | decode_value(_F,2, <>) -> 241 | StringSize = Size-1, 242 | case Rest of 243 | <> -> 244 | {String, Remain}; 245 | <> -> 246 | {String,Remain} 247 | end; 248 | decode_value(Format,3, <> = Binary) when byte_size(Binary) >= Size -> 249 | decode_next(Format,Rest, []); 250 | decode_value(Format,4, <> = Binary) when byte_size(Binary) >= Size -> 251 | {Array, Rest} = decode_next(Format,Data, []), 252 | case Format of 253 | proplist -> 254 | {{array,[Value || {_Key, Value} <- Array]}, Rest}; 255 | map -> 256 | Fun = fun({A,_},{B,_}) -> binary_to_integer(A) =< binary_to_integer(B) end, 257 | {[Value || {_Key, Value} <- lists:sort(Fun,maps:to_list(Array))], Rest} 258 | end; 259 | decode_value(_F,5, <<_Size:32/little-signed, 2:8/little, BinSize:32/little-signed, BinData:BinSize/binary-little-unit:8, Rest/binary>>) -> 260 | {{binary, 2, BinData}, Rest}; 261 | decode_value(_F,5, <>) -> 262 | {{binary, SubType, BinData}, Rest}; 263 | decode_value(_F,6, _Binary) -> 264 | throw(encountered_undefined); 265 | decode_value(_F,8, <<0:8, Rest/binary>>) -> 266 | {false, Rest}; 267 | decode_value(_F,8, <<1:8, Rest/binary>>) -> 268 | {true, Rest}; 269 | decode_value(_F,9, <>) -> 270 | UnixTime = Millis div 1000, 271 | MegaSecs = UnixTime div 1000000, 272 | Secs = UnixTime - (MegaSecs * 1000000), 273 | MicroSecs = (Millis - (UnixTime * 1000)) * 1000, 274 | {{MegaSecs, Secs, MicroSecs}, Rest}; 275 | decode_value(_F,10, Binary) -> 276 | {null, Binary}; 277 | decode_value(_F,11, Binary) -> 278 | {Expression, RestWithFlags} = decode_cstring(Binary, <<>>), 279 | {Flags, Rest} = decode_cstring(RestWithFlags, <<>>), 280 | {{regex, Expression, Flags}, Rest}; 281 | decode_value(_F,12, <> = Binary) when size(Binary) >= Size -> 282 | {NS, RestWithOID} = decode_cstring(Data, <<>>), 283 | {{oid, OID}, Rest} = decode_value(_F,7, RestWithOID), 284 | {{ref, NS, OID}, Rest}; 285 | decode_value(_F,13, <<_Size:32/little-signed, Data/binary>>) -> 286 | {Code, Rest} = decode_cstring(Data, <<>>), 287 | {{code, Code}, Rest}; 288 | decode_value(_F,14, <<_Size:32/little-signed, Data/binary>>) -> 289 | {Code, Rest} = decode_cstring(Data, <<>>), 290 | {{symbol, Code}, Rest}; 291 | decode_value(_F,15, <>) -> 292 | StrSize = StrBSize - 1, 293 | ScopeSize = ComplSize - 8 - StrBSize, 294 | <> = Rem, 295 | {{code,Code,decode(_F,Scope)}, Rest}; 296 | decode_value(_F,17, <>) -> 297 | {Integer, Rest}; 298 | decode_value(_F,255, Rest) -> 299 | {min, Rest}; 300 | decode_value(_F,127, Rest) -> 301 | {max, Rest}; 302 | decode_value(_F,18, <>) -> 303 | {Integer, Rest}. 304 | 305 | dec2hex(N, <>) -> 306 | dec2hex(<>, Rem); 307 | dec2hex(N,<<>>) -> 308 | N. 309 | 310 | hex2dec(N,{oid, Bin}) -> 311 | hex2dec(N, Bin); 312 | hex2dec(N,<>) -> 313 | hex2dec(<>, Rem); 314 | hex2dec(N,<<>>) -> 315 | N. 316 | 317 | dec0($a) -> 10; 318 | dec0($b) -> 11; 319 | dec0($c) -> 12; 320 | dec0($d) -> 13; 321 | dec0($e) -> 14; 322 | dec0($f) -> 15; 323 | dec0(X) -> X - $0. 324 | 325 | hex0(10) -> $a; 326 | hex0(11) -> $b; 327 | hex0(12) -> $c; 328 | hex0(13) -> $d; 329 | hex0(14) -> $e; 330 | hex0(15) -> $f; 331 | hex0(I) -> $0 + I. 332 | 333 | 334 | 335 | recfields(Rec) -> 336 | case true of 337 | _ when is_tuple(Rec) -> 338 | RecFields = get({recinfo, element(1,Rec)}); 339 | _ when is_atom(Rec) -> 340 | RecFields = get({recinfo, Rec}) 341 | end, 342 | case RecFields of 343 | undefined -> 344 | [_|Fields] = element(element(2, Rec), ?RECTABLE), 345 | Fields; 346 | [recindex|Fields] -> 347 | Fields; 348 | Fields -> 349 | Fields 350 | end. 351 | recoffset(Rec) -> 352 | case true of 353 | _ when is_tuple(Rec) -> 354 | RecFields = get({recinfo, element(1,Rec)}); 355 | _ when is_atom(Rec) -> 356 | RecFields = get({recinfo, Rec}) 357 | end, 358 | case RecFields of 359 | undefined -> 360 | 3; 361 | [recindex|_] -> 362 | 3; 363 | _ -> 364 | 2 365 | end. 366 | 367 | encoderec(Rec) -> 368 | % [_|Fields] = element(element(2, Rec), ?RECTABLE), 369 | Fields = recfields(Rec), 370 | % io:format("~p~n", [Fields]), 371 | encoderec(<<>>, deep, Rec, Fields, recoffset(Rec), <<>>). 372 | encode_findrec(Rec) -> 373 | % [_|Fields] = element(element(2, Rec), ?RECTABLE), 374 | Fields = recfields(Rec), 375 | encoderec(<<>>, flat, Rec, Fields, recoffset(Rec), <<>>). 376 | 377 | encoderec(NameRec, Type, Rec, [{FieldName, _RecIndex}|T], N, Bin) -> 378 | case element(N, Rec) of 379 | undefined -> 380 | encoderec(NameRec, Type, Rec, T, N+1, Bin); 381 | SubRec when Type == flat -> 382 | % [_|SubFields] = element(element(2, SubRec), ?RECTABLE), 383 | SubFields = recfields(SubRec), 384 | case NameRec of 385 | <<>> -> 386 | Dom = atom_to_binary(FieldName, latin1); 387 | _ -> 388 | Dom = <> 389 | end, 390 | encoderec(NameRec, Type, Rec, T, N+1, <>))/binary>>); 391 | Val -> 392 | encoderec(NameRec, Type, Rec, T, N+1, <>) 393 | end; 394 | encoderec(NameRec, Type, Rec, [FieldName|T], N, Bin) -> 395 | case element(N, Rec) of 396 | undefined -> 397 | encoderec(NameRec, Type,Rec, T, N+1, Bin); 398 | Val -> 399 | case FieldName of 400 | docid -> 401 | case NameRec of 402 | <<>> -> 403 | Dom = <<"_id">>; 404 | _ -> 405 | Dom = <> 406 | end, 407 | case Val of 408 | {oid, _} -> 409 | encoderec(NameRec, Type,Rec, T, N+1, <>); 410 | _ -> 411 | encoderec(NameRec, Type,Rec, T, N+1, <>) 412 | end; 413 | _ -> 414 | case NameRec of 415 | <<>> -> 416 | Dom = atom_to_binary(FieldName, latin1); 417 | _ -> 418 | Dom = <> 419 | end, 420 | encoderec(NameRec, Type,Rec, T, N+1, <>) 421 | end 422 | end; 423 | encoderec(<<>>,_,_, [], _, Bin) -> 424 | <<(byte_size(Bin)+5):32/little, Bin/binary, 0:8>>; 425 | encoderec(_,_,_, [], _, Bin) -> 426 | Bin. 427 | 428 | encoderec_selector(_, undefined) -> 429 | <<>>; 430 | encoderec_selector(_, <<>>) -> 431 | <<>>; 432 | encoderec_selector(Rec, SelectorList) -> 433 | % [_|Fields] = element(element(2, Rec), ?RECTABLE), 434 | Fields = recfields(Rec), 435 | encoderec_selector(SelectorList, Fields, recoffset(Rec), <<>>). 436 | 437 | % SelectorList is either a list of indexes in the record tuple, or a list of {TupleIndex, TupleVal}. Use the index to get the name 438 | % from the list of names. 439 | encoderec_selector([{FieldIndex, Val}|Fields], [FieldName|FieldNames], FieldIndex, Bin) -> 440 | case FieldName of 441 | docid -> 442 | encoderec_selector(Fields, FieldNames, FieldIndex+1, <>, Val}))/binary>>); 443 | {Name, _RecIndex} -> 444 | encoderec_selector(Fields, FieldNames, FieldIndex+1, <>); 445 | _ -> 446 | encoderec_selector(Fields, FieldNames, FieldIndex+1, <>) 447 | end; 448 | encoderec_selector([FieldIndex|Fields], [FieldName|FieldNames], FieldIndex, Bin) -> 449 | case FieldName of 450 | docid -> 451 | encoderec_selector(Fields, FieldNames, FieldIndex+1, <>, 1}))/binary>>); 452 | {Name, _RecIndex} -> 453 | encoderec_selector(Fields, FieldNames, FieldIndex+1, <>); 454 | _ -> 455 | encoderec_selector(Fields, FieldNames, FieldIndex+1, <>) 456 | end; 457 | encoderec_selector(Indexes, [_|Names], Index, Bin) -> 458 | encoderec_selector(Indexes, Names, Index+1, Bin); 459 | encoderec_selector([], _, _, Bin) -> 460 | <<(byte_size(Bin)+5):32/little, Bin/binary, 0:8>>. 461 | 462 | gen_prop_keyname([{[_|_] = KeyName, KeyVal}|T], Bin) -> 463 | gen_prop_keyname([{list_to_binary(KeyName), KeyVal}|T], Bin); 464 | gen_prop_keyname([{KeyName, KeyVal}|T], Bin) -> 465 | case is_integer(KeyVal) of 466 | true when T == [] -> 467 | Add = <<(list_to_binary(integer_to_list(KeyVal)))/binary>>; 468 | true -> 469 | Add = <<(list_to_binary(integer_to_list(KeyVal)))/binary,"_">>; 470 | false -> 471 | Add = <<>> 472 | end, 473 | gen_prop_keyname(T, <>); 474 | gen_prop_keyname([], B) -> 475 | B. 476 | 477 | gen_keyname(Rec, Keys) -> 478 | % [_|Fields] = element(element(2, Rec), ?RECTABLE), 479 | Fields = recfields(Rec), 480 | gen_keyname(Keys, Fields, recoffset(Rec), <<>>). 481 | 482 | gen_keyname([{KeyIndex, KeyVal}|Keys], [Field|Fields], KeyIndex, Name) -> 483 | case Field of 484 | {FieldName, _} -> 485 | true; 486 | FieldName -> 487 | true 488 | end, 489 | case is_integer(KeyVal) of 490 | true when Keys == [] -> 491 | Add = <<(list_to_binary(integer_to_list(KeyVal)))/binary>>; 492 | true -> 493 | Add = <<(list_to_binary(integer_to_list(KeyVal)))/binary,"_">>; 494 | false -> 495 | Add = <<>> 496 | end, 497 | gen_keyname(Keys, Fields, KeyIndex+1, <>); 498 | gen_keyname([], _, _, <<"_", Name/binary>>) -> 499 | Name; 500 | gen_keyname(Keys, [_|Fields], KeyIndex, Name) -> 501 | % [{I,_}|_] = Keys, 502 | gen_keyname(Keys, Fields, KeyIndex+1, Name). 503 | 504 | 505 | decoderec(Rec, <<>>) -> 506 | % Rec; 507 | erlang:make_tuple(tuple_size(Rec), undefined, [{1, element(1,Rec)}, {2, element(2,Rec)}]); 508 | decoderec(Rec, Bin) -> 509 | % [_|Fields] = element(element(2, Rec), ?RECTABLE), 510 | Fields = recfields(Rec), 511 | case recoffset(Rec) of 512 | 3 -> 513 | decode_records([], Bin, tuple_size(Rec), element(1,Rec), element(2, Rec), Fields); 514 | _ -> 515 | decode_records([], Bin, tuple_size(Rec), element(1,Rec), undefined, Fields) 516 | end. 517 | 518 | 519 | decode_records(RecList, <<_ObjSize:32/little, Bin/binary>>, TupleSize, Name, TabIndex, Fields) -> 520 | case TabIndex of 521 | undefined -> 522 | {FieldList, Remain} = get_fields([], Fields, 2, Bin), 523 | NewRec = erlang:make_tuple(TupleSize, undefined, [{1, Name}|FieldList]); 524 | _ -> 525 | {FieldList, Remain} = get_fields([], Fields, 3, Bin), 526 | NewRec = erlang:make_tuple(TupleSize, undefined, [{1, Name},{2, TabIndex}|FieldList]) 527 | end, 528 | decode_records([NewRec|RecList], Remain, TupleSize, Name, TabIndex, Fields); 529 | decode_records(R, <<>>, _, _, _, _) -> 530 | lists:reverse(R). 531 | 532 | get_fields(RecVals, Fields, Offset, Bin) -> 533 | case rec_field_list(RecVals, Offset, Fields, Bin) of 534 | {again, SoFar, Rem} -> 535 | get_fields(SoFar, Fields, Offset, Rem); 536 | Res -> 537 | Res 538 | end. 539 | 540 | rec_field_list(RecVals, _, _, <<0:8, Rem/binary>>) -> 541 | {RecVals, Rem}; 542 | % done; 543 | rec_field_list(RecVals, _, [], <>) -> 544 | {_Name, ValRem} = decode_cstring(Bin, <<>>), 545 | {_Value, Remain} = decode_value(proplist,Type, ValRem), 546 | {again, RecVals, Remain}; 547 | rec_field_list(RecVals, N, [Field|Fields], <>) -> 548 | % io:format("~p~n", [Field]), 549 | {Name, ValRem} = decode_cstring(Bin, <<>>), 550 | case Field of 551 | docid -> 552 | BinName = <<"_id">>; 553 | {Fn, _} -> 554 | BinName = atom_to_binary(Fn, latin1); 555 | Fn -> 556 | BinName = atom_to_binary(Fn, latin1) 557 | end, 558 | case BinName of 559 | Name -> 560 | case Field of 561 | {RecName, RecIndex} -> 562 | <> = ValRem, 563 | RecSize = LRecSize - 4, 564 | <> = RecObj, 565 | case is_integer(RecIndex) of 566 | true -> 567 | [_|RecFields] = element(RecIndex, ?RECTABLE), 568 | RecLen = length(RecFields)+2; 569 | false -> 570 | RecFields = recfields(RecName), 571 | RecLen = length(RecFields)+recoffset(RecName)-1 572 | end, 573 | [Value] = decode_records([], <>, RecLen, 574 | RecName, RecIndex, RecFields), 575 | rec_field_list([{N, Value}|RecVals], N+1, Fields, Remain); 576 | _ -> 577 | {Value, Remain} = decode_value(proplist,Type, ValRem), 578 | rec_field_list([{N, Value}|RecVals], N+1, Fields, Remain) 579 | % case Value of 580 | % {oid, V} -> 581 | % rec_field_list([{N, V}|RecVals], N+1, Fields, Remain); 582 | % _ -> 583 | % rec_field_list([{N, Value}|RecVals], N+1, Fields, Remain) 584 | % end 585 | end; 586 | _ -> 587 | rec_field_list(RecVals, N+1, Fields, <>) 588 | end. 589 | -------------------------------------------------------------------------------- /src/erlmongo.app.src: -------------------------------------------------------------------------------- 1 | {application, erlmongo, [{description, "Erlang driver for mongodb"}, 2 | {vsn, "1.0"}, 3 | {modules, []}, 4 | {registered, [mongodb, mongodb_supervisor]}, 5 | {applications, [kernel, stdlib]}, 6 | {mod, {erlmongo_app, []}}, 7 | {start_phases, []} 8 | ]}. 9 | -------------------------------------------------------------------------------- /src/erlmongo.hrl: -------------------------------------------------------------------------------- 1 | % metadata is an embedded document, 2 | -record(gfs_file, {recindex = 3, docid, filename, contentType, length, chunkSize, uploadDate, aliases, metadata, md5}). 3 | -record(gfs_chunk, {recindex = 4, docid, files_id, n, data}). 4 | % A table of records used with mongodb (tuple of record fields). 5 | % If you arent using an embedded record, you can use record_info(fields, name_of_record) 6 | % If a record uses an embedded record, you have to write the fields yourself 7 | % and the field which is an embedded record is: {name_of_record, index_of_record_in_RECTABLE} 8 | % field name also has to match the record name. 9 | -define(RECTABLE, {[recindex,docid,name,i, {address, 2}, tags], 10 | % If you wish to use metadata embedded record. 11 | % [recindex, docid, filename, contentType, length, chunkSize, uploadDate, aliases, {metadata, INDEX_HERE}, md5] 12 | record_info(fields, gfs_file), 13 | record_info(fields, gfs_chunk)}). 14 | 15 | -record(gfs_state,{pool,proc, db, file, collection, coll_name, length = 0, mode, 16 | nchunk = 0, flush_limit = 1048576, closed = false}). 17 | 18 | -ifdef(DEBUG). 19 | -define(DBG(Format, Args), io:format("L(~p:~p:~p:~p) : "++Format++"~n", [time(),self(),?MODULE,?LINE]++Args)). 20 | -define(DBG0(Format), io:format("L(~p:~p:~p:~p) : "++Format++"~n", [time(),self(),?MODULE,?LINE])). 21 | -else. 22 | -define(DBG(F,A),[]). 23 | -define(DBG0(F),[]). 24 | -endif. 25 | 26 | % mongo 27 | -define(QUER_OPT_NONE, 0). 28 | -define(QUER_OPT_CURSOR, 1). 29 | -define(QUER_OPT_SLAVEOK, 4). 30 | -define(QUER_OPT_NOTIMEOUT, 16). 31 | 32 | % criteria = either a record or proplist with parameters you are searching by 33 | % field_selector = list of fields you wish to return 34 | % ndocs = how many documents you wish to return, 0 = default 35 | % nskip = how many documents to skip 36 | % opts - Don't touch it. 37 | -record(search, {ndocs = 0, nskip = 0, criteria = <<>>, field_selector = <<>>, opts = ?QUER_OPT_NONE}). 38 | -record(cursor, {id, pid, limit = 0}). 39 | -record(update, {upsert = 1, selector = <<>>, document = <<>>}). 40 | -record(insert, {documents = []}). 41 | -record(delete, {selector = <<>>}). 42 | -record(killc, {cur_ids = <<>>}). 43 | -------------------------------------------------------------------------------- /src/erlmongo_app.erl: -------------------------------------------------------------------------------- 1 | -module(erlmongo_app). 2 | -behavior(application). 3 | -export([start/2, stop/1]). 4 | 5 | 6 | start(_Type, _Args) -> 7 | mongodb_supervisor:start_link(). 8 | 9 | 10 | stop(_State) -> 11 | ok. 12 | -------------------------------------------------------------------------------- /src/mongoapi.erl: -------------------------------------------------------------------------------- 1 | -module(mongoapi). 2 | -compile(nowarn_export_all). 3 | -compile(export_all). 4 | -include_lib("erlmongo.hrl"). 5 | 6 | new(Pool,DB) -> 7 | {?MODULE,[Pool,DB]}. 8 | 9 | set_encode_style(mochijson,{?MODULE,[Pool,DB]}) -> 10 | put({Pool, DB, style}, mochijson); 11 | set_encode_style(default,{?MODULE,[Pool,DB]}) -> 12 | put({Pool, DB, style}, default). 13 | 14 | set_encode_style({?MODULE,[Pool,DB]}) -> 15 | put({Pool, DB, style}, default). 16 | 17 | 18 | name([_|_] = Collection,PMI) -> 19 | name(list_to_binary(Collection), PMI); 20 | name(<<_/binary>> = Collection,{?MODULE,[_Pool,DB]}) -> 21 | <>; 22 | name(Collection,PMI) when is_atom(Collection) -> 23 | name(atom_to_binary(Collection, latin1),PMI). 24 | 25 | remove(Col, Selector, {?MODULE,[Pool,DB]}) -> 26 | mongodb:exec_delete(Pool,name(Col,{?MODULE,[Pool,DB]}), #delete{selector = bson:encode(Selector)}). 27 | remove_sync(Col, Selector, {?MODULE,[Pool,DB]}) -> 28 | case runCmd([{delete, Col}, {deletes, {array,[[{q, Selector},{limit,0}]]}}],{?MODULE,[Pool,DB]}) of 29 | [_|_] = Obj -> 30 | case [lists:keyfind(Find,1,Obj) || Find <- [<<"ok">>] ] of 31 | [{_,OK}] when OK > 0 -> 32 | ok; 33 | Invalid -> 34 | {error,Invalid} 35 | end; 36 | E -> 37 | E 38 | end. 39 | 40 | 41 | save(Collection, L, {?MODULE,[Pool,DB]}) -> 42 | % Style = case get({Pool, DB, style}) of 43 | % undefined -> default; 44 | % T -> T 45 | % end, 46 | case getid(L) of 47 | false -> 48 | OID = mongodb:create_id(), 49 | case L of 50 | #{} -> 51 | L1 = L#{<<"_id">> => {oid, OID}}; 52 | _ -> 53 | L1 = [{<<"_id">>, {oid, OID}}|L] 54 | end, 55 | case insert(Collection, L1, {?MODULE,[Pool,DB]}) of 56 | ok -> 57 | {ok,{oid, OID}}; 58 | badquery -> 59 | throw({error,{badquery,Collection,L}}); 60 | R -> 61 | R 62 | end; 63 | {_, OID} -> 64 | Sel = [{<<"_id">>, OID}], 65 | case update(Collection,Sel, L, [upsert], {?MODULE,[Pool,DB]}) of 66 | ok -> 67 | {ok,OID}; 68 | badquery -> 69 | throw({error,{badquery,Collection,L}}); 70 | R -> 71 | R 72 | end 73 | end. 74 | 75 | getid(L) when is_list(L) -> 76 | lists:keyfind(<<"_id">>,1,L); 77 | getid(#{} = L) -> 78 | case maps:get(<<"_id">>,L,false) of 79 | false -> 80 | false; 81 | Id -> 82 | {<<"_id">>,Id} 83 | end. 84 | 85 | 86 | % Examples: 87 | % update([{#mydoc.name, "docname"}], #mydoc{name = "different name"}, [upsert]) 88 | % update([{#mydoc.name, "docname"}], #mydoc{i = {inc, 1}}, [upsert]) 89 | % update([{#mydoc.name, "docname"}], #mydoc{tags = {push, "lamer"}}, []) 90 | % update([{#mydoc.name, "docname"}], #mydoc{tags = {pushAll, ["dumbass","jackass"]}}, [upsert]) 91 | % update([{#mydoc.name, "docname"}], #mydoc{tags = {pullAll, ["dumbass","jackass"]}}, [upsert]) 92 | % update([{#mydoc.name,"ime"}],#mydoc{i = {addToSet,{array,[1,2,3,4]}}},[upsert]) 93 | % update([{#mydoc.name,"ime"}],#mydoc{i = {addToSet,10}}},[upsert]). 94 | % and so on. 95 | % modifier list: inc, set, push, pushAll, pop, pull, pullAll 96 | % Flags can be: [upsert,multi] 97 | update(Col, Selector, Doc, Flags,PMI) -> 98 | run_update(Col, [Selector], [Doc], Flags,PMI). 99 | batchUpdate(Col, Selectors, Docs, Flags, PMI) -> 100 | run_update(Col, Selectors, Docs, Flags,PMI). 101 | updateAsync(Collection, Selector, Doc, Flags,{?MODULE,[Pool,DB]}) -> 102 | mongodb:exec_update(Pool,name(Collection,{?MODULE,[Pool,DB]}), #update{selector = bson:encode(Selector), document = bson:encode(Doc), upsert = updateflags(Flags,0)}). 103 | 104 | run_update(Col, Sels, Docs, Flags,PMI) -> 105 | [Upsert,Multi] = [lists:member(S,Flags) || S <- [upsert,multi]], 106 | L = [[{q, Sel}, {u, Doc}, {upsert, Upsert}, {multi, Multi}] || {Sel, Doc} <- lists:zip(Sels,Docs)], 107 | case runCmd([{update, Col}, {updates, {array,L}}],PMI) of 108 | [_|_] = Obj -> 109 | % io:format("Update ~p~n",[Obj]), 110 | case [lists:keyfind(Find,1,Obj) || Find <- [<<"ok">>,<<"n">>] ] of 111 | [{_,OK},{_,N}] when OK > 0, N > 0 -> 112 | ok; 113 | Invalid -> 114 | {error,Invalid} 115 | end; 116 | E -> 117 | % io:format("update ERROR ~p~n",[E]), 118 | E 119 | end. 120 | 121 | batchUpdateAsync(Col, [_|_] = Selector, [_|_] = Doc, Flags,{?MODULE,[Pool,DB]}) -> 122 | mongodb:exec_update(Pool,name(Col,{?MODULE,[Pool,DB]}),encbu([],Selector,Doc,updateflags(Flags,0))). 123 | encbu(L, [Sel|ST],[[_|_] = Doc|DT],Flags) -> 124 | encbu([#update{selector = bson:encode(Sel), document = bson:encode(Doc), upsert = Flags}|L],ST,DT,Flags); 125 | encbu(L,[],[],_) -> 126 | L. 127 | 128 | updateflags([upsert|T],V) -> 129 | updateflags(T,V bor 1); 130 | updateflags([multi|T],V) -> 131 | updateflags(T,V bor 2); 132 | updateflags([], V) -> 133 | V. 134 | 135 | insert(Col, L, PMI) -> 136 | run_insert(Col,[L], PMI). 137 | insertAsync(Col, L, {?MODULE,[Pool,DB]}) -> 138 | mongodb:exec_insert(Pool,name(Col,{?MODULE,[Pool,DB]}), #insert{documents = bson:encode(L)}). 139 | 140 | 141 | run_insert(Col,L,PMI) -> 142 | case runCmd([{insert, Col}, {documents, {array,L}}],PMI) of 143 | [_|_] = Obj -> 144 | case [lists:keyfind(Find,1,Obj) || Find <- [<<"ok">>,<<"n">>] ] of 145 | [{_,OK},{_,N}] when OK > 0, N > 0 -> 146 | ok; 147 | Invalid -> 148 | {error,Invalid} 149 | end; 150 | E -> 151 | % io:format("Insert ERROR ~p~n",[E]), 152 | E 153 | end. 154 | 155 | batchInsert(Col, [_|_] = LRecs, PMI) -> 156 | run_insert(Col, LRecs, PMI). 157 | batchInsertAsync(Col, [_|_] = LRecs,{?MODULE,[Pool,DB]}) -> 158 | DocBin = lists:foldl(fun(L, Bin) -> <> end, <<>>, LRecs), 159 | mongodb:exec_insert(Pool,name(Col,{?MODULE,[Pool,DB]}), #insert{documents = DocBin}). 160 | 161 | % Advanced queries: 162 | % Regex: Mong:find(#mydoc{name = {regex, "(.+?)\.flv", "i"}}, undefined,0,0) 163 | % Documents with even i: Mong:find(#mydoc{i = {mod, 2, 0}}, undefined, 0,0). 164 | % Documents with i larger than 2: Mong:find(#mydoc{i = {gt, 2}}, undefined, 0,0). 165 | % Documents with i between 2 and 5: Mong:find(#mydoc{i = {in, {gt, 2}, {lt, 5}}}, undefined, 0,0). 166 | % in example: Mong:find(#mydoc{tags = {in, [2,3,4]}}, undefined, 0,0). 167 | % exists example: Mong:find(#mydoc{tags = {exists, false}}, undefined, 0,0). 168 | % Advanced query operators: gt,lt,gte,lte, ne, in, nin, all, size, exists,'not' 169 | % Possible regex options: "ilmsux" -> IN THIS SEQUENCE! (not all are necessary of course) 170 | % i case-insensitive matching 171 | % m multiline: "^" and "$" match the beginning / end of each line as well as the whole string 172 | % x verbose / comments: the pattern can contain comments 173 | % l (lowercase L) locale: \w, \W, etc. depend on the current locale 174 | % s dotall: the "." character matches everything, including newlines 175 | % u unicode: \w, \W, etc. match unicode 176 | findOne(Col, Q, {?MODULE,[Pool,DB]}) -> 177 | findOne(Col, Q, proplist, {?MODULE,[Pool,DB]}). 178 | findOne(Col, [], Format, {?MODULE,[Pool,DB]}) -> 179 | case find(Col, [], undefined, 0, 1,Format,{?MODULE,[Pool,DB]}) of 180 | {ok, [Res]} -> {ok, Res}; 181 | {ok, []} -> {ok, []}; 182 | R -> 183 | R 184 | end; 185 | findOne(Col, Query, Format,{?MODULE,[Pool,DB]}) when Format == proplist; Format == map -> 186 | case find(Col, Query, undefined, 0, 1, Format,{?MODULE,[Pool,DB]}) of 187 | {ok, [Res]} -> {ok, Res}; 188 | {ok, []} -> {ok, []}; 189 | R -> 190 | R 191 | end; 192 | findOne(Col, Query, Selector,{?MODULE,[Pool,DB]}) -> 193 | findOne(Col, Query, Selector,proplist,{?MODULE,[Pool,DB]}). 194 | findOne(Col, Query, Selector,Format,{?MODULE,[Pool,DB]}) -> 195 | case find(Col, Query, Selector, 0, 1, Format,{?MODULE,[Pool,DB]}) of 196 | {ok, [Res]} -> {ok, Res}; 197 | {ok, []} -> {ok, []}; 198 | R -> 199 | R 200 | end. 201 | 202 | find(Col, #search{} = Q,{?MODULE,[Pool,DB]}) -> 203 | find(Col, Q#search.criteria, Q#search.field_selector, Q#search.nskip, Q#search.ndocs,{?MODULE,[Pool,DB]}). 204 | 205 | % Format = [proplist | map] 206 | find(Col, Query, Selector, From, Limit,{?MODULE,[Pool,DB]}) -> 207 | find(Col, Query, Selector, From, Limit, proplist,{?MODULE,[Pool,DB]}). 208 | find(Col, Query, Selector, From, Limit,Format,{?MODULE,[Pool,DB]}) -> 209 | TStart = erlang:monotonic_time(millisecond), 210 | QueryBin = bson:encode(Query), 211 | QuerySize = iolist_size(QueryBin), 212 | Quer = #search{ndocs = Limit, nskip = From, criteria = QueryBin, field_selector = bson:encode(Selector)}, 213 | case mongodb:exec_find(Pool,name(Col,{?MODULE,[Pool,DB]}), Quer) of 214 | not_connected -> 215 | not_connected; 216 | <<>> -> 217 | {ok, []}; 218 | {ok,Tracing,Res} -> 219 | BsonSize = iolist_size(Res), 220 | Decoded = bson:decode(Format,Res), 221 | TEnd = erlang:monotonic_time(millisecond), 222 | TracingQ = Tracing#{col => Col, q => Query, q_size => QuerySize, 223 | selector => Selector, from => From, limit => Limit, 224 | bson_resp_bytes => BsonSize}, 225 | report(TEnd - TStart, TracingQ), 226 | {ok, Decoded} 227 | end. 228 | 229 | report(Time,Obj) -> 230 | case application:get_env(erlmongo,tracing_mod) of 231 | {ok,Mod} -> 232 | apply(Mod, mongo_query_done, [Time, Obj]); 233 | _ -> 234 | ok 235 | end. 236 | 237 | % opts: [[map | proplist],reverse, {sort, SortyBy}, explain, {hint, Hint}, snapshot] 238 | % SortBy: {key, Val} or a list of keyval tuples -> {i,1} (1 = ascending, -1 = descending) 239 | % Hint: [{Key,Val}] -> [{#mydoc.i,1}] 240 | findOpt(Col, Query, Selector, Opts, From, Limit,{?MODULE,[Pool,DB]}) -> 241 | case Query of 242 | [] -> 243 | {_,Format,Q} = translateopts(false,undefined, Opts,[{<<"query">>, {bson,bson:encode([])}}],proplist); 244 | _ -> 245 | {_,Format,Q} = translateopts(false,undefined, Opts,[{<<"query">>, Query}],proplist) 246 | end, 247 | find(Col, Q, Selector, From, Limit,Format,{?MODULE,[Pool,DB]}). 248 | 249 | 250 | findOpt(Col, #search{} = Q, Opts,{?MODULE,[Pool,DB]}) -> 251 | findOpt(Col, Q#search.criteria, Q#search.field_selector, Opts, Q#search.nskip, Q#search.ndocs,{?MODULE,[Pool,DB]}). 252 | 253 | cursor(Col,Query, Selector, Opts, From, Limit,{?MODULE,[Pool,DB]}) -> 254 | cursor(Col,Query, Selector, Opts, From, Limit, proplist,{?MODULE,[Pool,DB]}). 255 | cursor(Col,Query, Selector, Opts, From, Limit,Format,{?MODULE,[Pool,DB]}) -> 256 | {_,Format,Q} = translateopts(false,Query, Opts,[{<<"query">>, {bson, bson:encode(Query)}}], Format), 257 | Quer = #search{ndocs = Limit, nskip = From, field_selector = bson:encode(Selector), 258 | criteria = bson:encode(Q),opts = ?QUER_OPT_CURSOR}, 259 | case mongodb:exec_cursor(Pool,name(Col,{?MODULE,[Pool,DB]}), Quer) of 260 | not_connected -> 261 | not_connected; 262 | {done, <<>>} -> 263 | {done, []}; 264 | {done, Result} -> 265 | {done, bson:decode(Format,Result)}; 266 | {Cursor, Result} -> 267 | {ok, Cursor, bson:decode(Format,Result)} 268 | end. 269 | 270 | getMore(Rec, Cursor,{?MODULE,[Pool,_DB]}) when is_list(Rec); is_binary(Rec) -> 271 | case mongodb:exec_getmore(Pool,Rec, Cursor) of 272 | not_connected -> 273 | not_connected; 274 | {done, <<>>} -> 275 | {done, []}; 276 | {done, Result} -> 277 | {done, bson:decode(Result)}; 278 | {ok, Result} -> 279 | {ok, bson:decode(Result)} 280 | end. 281 | closeCursor(Cur,_PMI) -> 282 | Cur#cursor.pid ! {cleanup}, 283 | ok. 284 | 285 | translateopts(H,Q, [proplist|T], L, _F) -> 286 | translateopts(H,Q, T, L, proplist); 287 | translateopts(H,Q, [map|T], L, _F) -> 288 | translateopts(H,Q, T, L, map); 289 | translateopts(H,undefined, [{sort, SortBy}|T], L,F) when is_list(SortBy); is_map(SortBy) -> 290 | translateopts(H,undefined, T, [{<<"orderby">>, SortBy}|L],F); 291 | translateopts(H,undefined, [{sort, {Key,Val}}|T], L,F) -> 292 | translateopts(H,undefined, T, [{<<"orderby">>, [{Key,Val}]}|L],F); 293 | translateopts(H,Rec, [reverse|T], L,F) -> 294 | translateopts(H,Rec, T, [{<<"orderby">>, [{<<"$natural">>, -1}]}|L],F); 295 | translateopts(_,Rec, [explain|T], L,F) -> 296 | translateopts(true,Rec, T, [{<<"$explain">>, true}|L],F); 297 | translateopts(H,Rec, [snapshot|T], L,F) -> 298 | translateopts(H,Rec, T, [{<<"$snapshot">>, true}|L],F); 299 | translateopts(H,undefined, [{hint, Hint}|T], L,F) -> 300 | translateopts(H,undefined, T, [{<<"$hint">>, [{Hint, 1}]}|L],F); 301 | translateopts(H,_, [], L,F) -> 302 | {H,F,L}. 303 | 304 | % If you wish to index on an embedded document, use proplists. 305 | % Example: ensureIndex(<<"mydoc">>, [{<<"name">>, 1}]). 306 | % ensureIndex(<<"mydoc">>,[{<<"name",1}],[{"unique",true}]). 307 | % You can use lists, they will be turned into binaries. 308 | ensureIndex(Collection, Keys,{?MODULE,[Pool,DB]}) -> 309 | ensureIndex(Collection,Keys,[],{?MODULE,[Pool,DB]}). 310 | ensureIndex([_|_] = Collection, Keys,Opts,{?MODULE,[Pool,DB]}) -> 311 | ensureIndex(list_to_binary(Collection), Keys,Opts,{?MODULE,[Pool,DB]}); 312 | ensureIndex(<<_/binary>> = Collection, Keys,Opts,{?MODULE,[Pool,DB]}) -> 313 | Obj = [{plaintext, <<"name">>, bson:gen_prop_keyname(Keys, <<>>)}, 314 | {plaintext, <<"ns">>, name(Collection,{?MODULE,[Pool,DB]})}, 315 | {<<"key">>, {bson, bson:encode(Keys)}}|Opts], 316 | Bin = bson:encode(Obj), 317 | mongodb:ensureIndex(Pool,DB, Bin). 318 | % Example: ensureIndex(#mydoc{}, [{#mydoc.name, 1}]) 319 | 320 | deleteIndexes([_|_] = Collection,{?MODULE,[Pool,DB]}) -> 321 | deleteIndexes(list_to_binary(Collection),{?MODULE,[Pool,DB]}); 322 | deleteIndexes(<<_/binary>> = Collection,{?MODULE,[Pool,DB]}) -> 323 | mongodb:clearIndexCache(), 324 | mongodb:exec_cmd(Pool,DB, [{plaintext, <<"deleteIndexes">>, Collection}, {plaintext, <<"index">>, <<"*">>}]). 325 | 326 | deleteIndex(Rec, Key,{?MODULE,[Pool,DB]}) -> 327 | mongodb:clearIndexCache(), 328 | mongodb:exec_cmd(Pool,DB,[{plaintext, <<"deleteIndexes">>, atom_to_binary(element(1,Rec), latin1)}, 329 | {plaintext, <<"index">>, bson:gen_keyname(Rec,Key)}]). 330 | 331 | % How many documents in mydoc collection: Mong:count("mydoc"). 332 | % Mong:count(#mydoc{}). 333 | % How many documents with i larger than 2: Mong:count(#mydoc{i = {gt, 2}}). 334 | count(Col,{?MODULE,[Pool,DB]}) -> 335 | count(Col,undefined,{?MODULE,[Pool,DB]}). 336 | count(ColIn, Query,{?MODULE,[Pool,DB]}) -> 337 | case true of 338 | _ when is_list(ColIn) -> 339 | Col = list_to_binary(ColIn); 340 | _ when is_tuple(ColIn) -> 341 | Col = atom_to_binary(element(1,ColIn), latin1); 342 | _ -> 343 | Col = ColIn 344 | end, 345 | case true of 346 | _ when is_list(Query) -> 347 | Cmd = [{plaintext, <<"count">>, Col}, {plaintext, <<"ns">>, DB}, {<<"query">>, {bson, bson:encode(Query)}}]; 348 | _ when is_tuple(ColIn), Query == undefined -> 349 | Cmd = [{plaintext, <<"count">>, Col}, {plaintext, <<"ns">>, DB}, {<<"query">>, {bson, bson:encoderec(ColIn)}}]; 350 | _ when is_tuple(ColIn), is_tuple(Query) -> 351 | Cmd = [{plaintext, <<"count">>, Col}, {plaintext, <<"ns">>, DB}, {<<"query">>, {bson, bson:encoderec(Query)}}]; 352 | _ when Query == undefined -> 353 | Cmd = [{plaintext, <<"count">>, Col}, {plaintext, <<"ns">>, DB}] 354 | end, 355 | case mongodb:exec_cmd(Pool,DB, Cmd) of 356 | [_|_] = Obj -> 357 | case proplists:get_value(<<"n">>,Obj) of 358 | undefined -> 359 | 0; 360 | Val -> 361 | round(Val) 362 | end; 363 | _ -> 364 | 0 365 | end. 366 | 367 | 368 | addUser(U, P,PMI) when is_binary(U) -> 369 | addUser(binary_to_list(U),P,PMI); 370 | addUser(U, P,PMI) when is_binary(P) -> 371 | addUser(U,binary_to_list(P),PMI); 372 | addUser(Username, Password,PMI) -> 373 | save(<<"system.users">>, [{<<"user">>, Username}, 374 | {<<"pwd">>, bson:dec2hex(<<>>, erlang:md5(Username ++ ":mongo:" ++ Password))}],PMI). 375 | 376 | % Collection: name of collection 377 | % Key (list of fields): [{"i", 1}] 378 | % Reduce: {code, "JS code", Parameters} -> Parameters can be [] 379 | % Initial: default values for output object [{"result",0}] 380 | % Optional: [{"$keyf", {code, "JScode",Param}}, {"cond", CondObj}, {"finalize", {code,_,_}}] 381 | % Example: Mong:group("file",[{"ch",1}], {code, "function(doc,out){out.size += doc.size}", []}, [{"size", 0}],[]). 382 | group(Collection, Key,Reduce,Initial,Optional,PMI) -> 383 | runCmd([{"group", [{<<"ns">>, Collection}, 384 | {<<"key">>,Key}, 385 | {<<"$reduce">>, Reduce}, 386 | {<<"initial">>, Initial}|Optional]}],PMI). 387 | 388 | % Mong:eval("function(){return 3+3;}"). 389 | % Mong:eval({code, "function(){return what+3;}",[{"what",5}]}). 390 | eval(Code,PMI) -> 391 | runCmd([{<<"$eval">>, Code}],PMI). 392 | 393 | 394 | % Runs $cmd. Parameters can be just a string it will be converted into {string,1} 395 | runCmd({_,_} = T,PMI) -> 396 | runCmd([T],PMI); 397 | runCmd([{_,_}|_] = L,{?MODULE,[Pool,DB]}) -> 398 | mongodb:exec_cmd(Pool,DB, L); 399 | runCmd([H|_] = L,{?MODULE,[Pool,DB]}) when is_map(H) -> 400 | mongodb:exec_cmd(Pool,DB, L); 401 | runCmd([_|_] = L,PMI) -> 402 | runCmd([{L,1}],PMI); 403 | runCmd(<<_/binary>> = L,PMI) -> 404 | runCmd(binary_to_list(L),PMI). 405 | 406 | stats(C,PMI) when is_tuple(C) -> 407 | stats(atom_to_binary(element(1,C),latin1),PMI); 408 | stats(Collection,PMI) -> 409 | runCmd([{"collstats", Collection}],PMI). 410 | 411 | repairDatabase(PMI) -> 412 | runCmd([{"repairDatabase", 1}],PMI). 413 | dropDatabase(PMI) -> 414 | runCmd([{"dropDatabase", 1}],PMI). 415 | cloneDatabase(From,PMI) when is_list(From); is_binary(From) -> 416 | runCmd([{"clone", From}],PMI). 417 | 418 | dropCollection(C,P) when is_tuple(C) -> 419 | dropCollection(atom_to_binary(element(1,C),latin1),P); 420 | dropCollection(Collection,PMI) -> 421 | mongodb:clearIndexCache(), 422 | runCmd([{"drop", Collection}],PMI). 423 | 424 | createCollection(Name,{?MODULE,[Pool,DB]}) -> 425 | createCollection(Name, [],{?MODULE,[Pool,DB]}). 426 | % Options: idindex, noidindex, capped, {size, MaxSizeBytes}, {max, MaxElements} 427 | createCollection(Name, L,{?MODULE,[Pool,DB]}) when is_tuple(Name) -> 428 | createCollection(atom_to_binary(element(1,Name), latin1), L,{?MODULE,[Pool,DB]}); 429 | createCollection(Name, L,{?MODULE,[Pool,DB]}) -> 430 | runCmd([{<<"create">>, Name}] ++ translatecolopts(L, []),{?MODULE,[Pool,DB]}). 431 | 432 | translatecolopts([idindex|T], O) -> 433 | translatecolopts(T, [{<<"autoIndexId">>, true}|O]); 434 | translatecolopts([noidindex|T], O) -> 435 | translatecolopts(T, [{<<"autoIndexId">>, false}|O]); 436 | translatecolopts([capped|T], O) -> 437 | translatecolopts(T, [{<<"capped">>, true}|O]); 438 | translatecolopts([{size, MaxSize}|T], O) -> 439 | translatecolopts(T, [{<<"size">>, MaxSize}|O]); 440 | translatecolopts([{max, Max}|T], O) -> 441 | translatecolopts(T, [{<<"max">>, Max}|O]); 442 | translatecolopts([], O) -> 443 | O. 444 | 445 | setProfilingLevel(L,{?MODULE,[Pool,DB]}) when is_integer(L) -> 446 | case true of 447 | _ when L > 0 -> 448 | createCollection(<<"system.profile">>, [capped, {size, 131072}]); 449 | _ when L >= 0, L =< 2 -> 450 | true 451 | end, 452 | runCmd([{"profile", L}],{?MODULE,[Pool,DB]}). 453 | getProfilingLevel(PMI) -> 454 | runCmd([{"profile", -1}],PMI). 455 | 456 | % 457 | % Run this before writing any files, or writing will fail! 458 | % 459 | gfsIndexes(PMI) -> 460 | gfsIndexes(<<"fd">>,PMI). 461 | gfsIndexes(Collection,PMI) -> 462 | ensureIndex(<>,[{<<"files_id">>,1},{<<"n">>,1}],PMI), 463 | ensureIndex(<>,[{<<"filename">>,1}],PMI). 464 | 465 | gfsNew(Filename,PMI) -> 466 | gfsNew(<<"fd">>, Filename, [],PMI). 467 | gfsNew(Filename, Opts,PMI) -> 468 | gfsNew(<<"fd">>, Filename, Opts,PMI). 469 | gfsNew([_|_] = Collection, Filename, Opts,PMI) -> 470 | gfsNew(list_to_binary(Collection), Filename, Opts,PMI); 471 | gfsNew(<<_/binary>> = Collection, Filename, Opts,{?MODULE,[Pool,DB]}) -> 472 | mongodb:startgfs(gfsopts(Opts,#gfs_state{pool = Pool,file = #gfs_file{filename = Filename, length = 0, chunkSize = 262144, 473 | docid = {oid,mongodb:create_id()}, uploadDate = os:timestamp()}, 474 | collection = name(Collection,{?MODULE,[Pool,DB]}), 475 | coll_name = Collection, db = DB, mode = write})). 476 | 477 | gfsopts([{meta, Rec}|T], S) -> 478 | gfsopts(T, S#gfs_state{file = (S#gfs_state.file)#gfs_file{metadata = Rec}}); 479 | gfsopts([{aliases, L}|T], S) -> 480 | gfsopts(T, S#gfs_state{file = (S#gfs_state.file)#gfs_file{aliases = {array, L}}}); 481 | gfsopts([{mime, Mime}|T], S) -> 482 | gfsopts(T, S#gfs_state{file = (S#gfs_state.file)#gfs_file{contentType = Mime}}); 483 | gfsopts([{chunkSize, Size}|T], S) -> 484 | gfsopts(T, S#gfs_state{file = (S#gfs_state.file)#gfs_file{chunkSize = Size}}); 485 | gfsopts([{flushLimit, Limit}|T], S) -> 486 | gfsopts(T, S#gfs_state{flush_limit = Limit}); 487 | gfsopts([_|T], S) -> 488 | gfsopts(T,S); 489 | gfsopts([], S) -> 490 | S. 491 | 492 | gfsWrite(PID, Bin,_P) -> 493 | PID ! {write, Bin}, 494 | ok. 495 | gfsFlush(PID,_P) -> 496 | PID ! {flush}, 497 | ok. 498 | gfsClose(PID,_P) -> 499 | unlink(PID), 500 | PID ! {close}, 501 | ok. 502 | 503 | gfsOpen(R,PMI) -> 504 | gfsOpen(<<"fd">>, R,PMI). 505 | gfsOpen([_|_] = Col, R,PMI) -> 506 | gfsOpen(list_to_binary(Col),R,PMI); 507 | gfsOpen(Collection, R,{?MODULE,[Pool,DB]}) -> 508 | case true of 509 | _ when R#gfs_file.docid == undefined; R#gfs_file.length == undefined; R#gfs_file.md5 == undefined -> 510 | Quer = #search{ndocs = 1, nskip = 0, criteria = bson:encode_findrec(R)}, 511 | case mongodb:exec_find(Pool,name(<>,{?MODULE,[Pool,DB]}), Quer) of 512 | not_connected -> 513 | not_connected; 514 | <<>> -> 515 | []; 516 | {ok,_Tracing,Result} -> 517 | [DR] = bson:decoderec(R, Result), 518 | gfsOpen(Collection,DR) 519 | end; 520 | _ -> 521 | mongodb:startgfs(#gfs_state{pool = Pool,file = R, coll_name = Collection, collection = name(Collection,{?MODULE,[Pool,DB]}), db = DB, mode = read}) 522 | end. 523 | 524 | gfsRead(PID, N, _PMI) -> 525 | PID ! {read, self(), N}, 526 | receive 527 | {gfs_bytes, Bin} -> 528 | Bin 529 | after 5000 -> 530 | false 531 | end. 532 | 533 | gfsDelete(R,PMI) -> 534 | gfsDelete(<<"fd">>, R, PMI). 535 | gfsDelete([_|_] = Col, R, PMI) -> 536 | gfsDelete(list_to_binary(Col),R, PMI); 537 | gfsDelete(Collection, R, {?MODULE,[Pool,DB]}) -> 538 | case R#gfs_file.docid of 539 | undefined -> 540 | Quer = #search{ndocs = 1, nskip = 0, criteria = bson:encode_findrec(R)}, 541 | case mongodb:exec_find(Pool,name(<>,{?MODULE,[Pool,DB]}), Quer) of 542 | not_connected -> 543 | not_connected; 544 | <<>> -> 545 | []; 546 | {ok,_Tracing,Result} -> 547 | [DR] = bson:decoderec(R, Result), 548 | gfsDelete(DR,{?MODULE,[Pool,DB]}) 549 | end; 550 | _ -> 551 | % NChunks = (R#gfs_file.length div R#gfs_file.chunkSize) + 1, 552 | remove(<>, [{<<"_id">>, {oid, R#gfs_file.docid}}],{?MODULE,[Pool,DB]}), 553 | remove(<>, [{<<"files_id">>, {oid, R#gfs_file.docid}}],{?MODULE,[Pool,DB]}) 554 | end. 555 | 556 | 557 | testw(Mong, Filename) -> 558 | spawn(fun() -> 559 | % If the calling process does gfsNew, gfsWrite and dies immediately after, 560 | % gfsClose is necesssary. This is because of a race condition. 561 | % Both calls will complete before gfs gets the chance to set trap_exit to true and detect 562 | % the caller has died. 563 | {ok,Bin} = file:read_file(Filename), 564 | Mong:gfsIndexes(), 565 | PID = Mong:gfsNew(Filename), 566 | Mong:gfsWrite(PID,Bin), 567 | Mong:gfsClose(PID) 568 | end). 569 | -------------------------------------------------------------------------------- /src/mongodb.erl: -------------------------------------------------------------------------------- 1 | -module(mongodb). 2 | -export([print_info/0, start/0, stop/0, init/1, handle_call/3, 3 | handle_cast/2, handle_info/2, terminate/2, code_change/3]). 4 | % API 5 | -export([connect/1, connect/2, is_connected/1,deleteConnection/1, 6 | set_ssl/1, set_ssl/2, is_ssl/0, ssl_opts/0, 7 | singleServer/2, singleServer/3, singleServer/1, singleServer/5, 8 | masterSlave/3, masterSlave/4, masterSlave/6, 9 | masterMaster/3, masterMaster/4, masterMaster/6, 10 | replicaPairs/3, replicaPairs/4, replicaPairs/6, 11 | datetime_to_now/1, 12 | replicaSets/2, replicaSets/3, replicaSets/5, 13 | sharded/2, sharded/3, sharded/5, 14 | tracing_mod/1]). 15 | % Internal 16 | -export([exec_cursor/3, exec_delete/3, exec_cmd/3, exec_insert/3, exec_find/3, exec_update/3, exec_getmore/3, 17 | ensureIndex/3, clearIndexCache/0, create_id/0, startgfs/1]). 18 | -include_lib("erlmongo.hrl"). 19 | % -compile(export_all). 20 | 21 | -define(MONGO_PORT, 27017). 22 | -define(RECONNECT_DELAY, 1000). 23 | 24 | -define(OP_REPLY, 1). 25 | -define(OP_MSG, 1000). 26 | -define(OP_UPDATE, 2001). 27 | -define(OP_INSERT, 2002). 28 | -define(OP_QUERY, 2004). 29 | -define(OP_GET_MORE, 2005). 30 | -define(OP_DELETE, 2006). 31 | -define(OP_KILL_CURSORS, 2007). 32 | 33 | start() -> 34 | gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). 35 | 36 | stop() -> 37 | gen_server:call(?MODULE, stop). 38 | 39 | % register() -> 40 | % supervisor:start_child(supervisor, {?MODULE, {?MODULE, start, []}, permanent, 1000, worker, [?MODULE]}). 41 | 42 | print_info() -> 43 | gen_server:cast(?MODULE, {print_info}). 44 | 45 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 46 | % 47 | % API 48 | % 49 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 50 | connect(Pool) when is_atom(Pool) -> 51 | gen_server:cast(?MODULE, {start_connection, Pool, undefined}). 52 | % On query result calls Mod:mongo_query_done(Milliseconds,InfoObj) 53 | tracing_mod(Mod) -> 54 | application:set_env(erlmongo, tracing_mod, Mod). 55 | % For when connection is established. Parameter can be: 56 | % - {Module,Function,Params} 57 | % - PID, that gets a {mongodb_connected} message 58 | connect(Pool, Callback) when is_atom(Pool) andalso 59 | (is_pid(Callback) orelse is_tuple(Callback) andalso tuple_size(Callback) == 3) -> 60 | gen_server:cast(?MODULE, {start_connection, Pool, Callback}). 61 | 62 | deleteConnection(Pool) when is_atom(Pool) -> 63 | gen_server:cast(?MODULE,{delete_connection,Pool}). 64 | 65 | is_connected(Pool) when is_atom(Pool) -> 66 | Sz = ets:info(Pool,size), 67 | is_integer(Sz) andalso Sz > 0. 68 | 69 | singleServer(Pool) -> 70 | singleServer(Pool, 10). 71 | singleServer(Pool, Size) when is_atom(Pool), is_integer(Size) -> 72 | singleServer(Pool, Size, "localhost:"++integer_to_list(?MONGO_PORT)). 73 | singleServer(Pool, Size, [_|_] = Addr) when is_atom(Pool), is_integer(Size) -> 74 | singleServer(Pool, Size, Addr, undefined, undefined). 75 | singleServer(Pool, Size, [_|_] = Addr, Username, Pw) when is_atom(Pool), is_integer(Size) -> 76 | [IP,Port] = parse_addr(Addr), 77 | gen_server:cast(?MODULE, {conninfo, Pool, Size, Username, Pw, [{IP,Port}, {IP,Port}]}). 78 | 79 | masterSlave(Pool, M, S) -> 80 | masterSlave(Pool, 10, M, S). 81 | masterSlave(Pool, Size, [_|_] = MasterAddr, [_|_] = SlaveAddr) -> 82 | masterSlave(Pool, Size, MasterAddr, SlaveAddr, undefine, undefined). 83 | masterSlave(Pool, Size, [_|_] = MasterAddr, [_|_] = SlaveAddr, Username, Pw) when is_atom(Pool), is_integer(Size) -> 84 | [IP1,Port1] = parse_addr(MasterAddr), 85 | [IP2,Port2] = parse_addr(SlaveAddr), 86 | gen_server:cast(?MODULE, {conninfo,Pool, Size, Username, Pw, [{IP1,Port1}, {IP2,Port2}]}). 87 | % gen_server:cast(?MODULE, {conninfo,Pool, Size, {masterSlave, {IP1,Port1}, {IP2,Port2}}}). 88 | 89 | masterMaster(Pool, A1, A2) -> 90 | masterMaster(Pool, 10, A1, A2). 91 | masterMaster(Pool,Size,[_|_] = Addr1, [_|_] = Addr2) -> 92 | sharded(Pool,Size,[Addr1,Addr2], undefined, undefined). 93 | masterMaster(Pool, Size, A1, A2, Us, Pw) -> 94 | sharded(Pool, Size, [A1, A2], Us, Pw). 95 | 96 | sharded(Pool, L) -> 97 | sharded(Pool, 10, L). 98 | sharded(Pool, Size, L) -> 99 | sharded(Pool, Size, L, undefined, undefined). 100 | sharded(Pool, Size, [[_|_]|_] = L, Us, Pw) when is_atom(Pool), is_integer(Size) -> 101 | SL = [list_to_tuple(parse_addr(A)) || A <- L], 102 | gen_server:cast(?MODULE, {conninfo,Pool, Size, Us, Pw, SL}); 103 | sharded(Pool, Size, [_|_] = L, Us, Pw) -> 104 | sharded(Pool, Size,[L], Us, Pw). 105 | 106 | replicaPairs(Pool, A1, A2) -> 107 | replicaPairs(Pool, 10, A1, A2). 108 | replicaPairs(Pool, Size, [_|_] = Addr1, [_|_] = Addr2) -> 109 | replicaPairs(Pool, Size, Addr1, Addr2, undefined, undefined). 110 | replicaPairs(Pool, Size, [_|_] = Addr1, [_|_] = Addr2, Us, Pw) when is_atom(Pool) -> 111 | [IP1,Port1] = parse_addr(Addr1), 112 | [IP2,Port2] = parse_addr(Addr2), 113 | % gen_server:cast(?MODULE, {conninfo,Pool, Size, {replicaPairs, {IP1,Port1}, {IP2,Port2}}}). 114 | gen_server:cast(?MODULE, {conninfo,Pool, Size, Us, Pw, [{IP1,Port1}, {IP2,Port2}]}). 115 | % Takes a list of "Address:Port" 116 | replicaSets(Pool,L) -> 117 | replicaSets(Pool,10,L). 118 | replicaSets(Pool,Size,L) -> 119 | replicaSets(Pool, Size, L, undefined, undefined). 120 | replicaSets(Pool,Size,L, Us, Pw) when is_atom(Pool), is_integer(Size) -> 121 | LT = [list_to_tuple(parse_addr(S)) || S <- L], 122 | % gen_server:cast(?MODULE,{conninfo,Pool,Size,{replicaSets,LT}}). 123 | gen_server:cast(?MODULE,{conninfo,Pool,Size,Us,Pw,LT}). 124 | 125 | parse_addr(A) -> 126 | case string:tokens(A,":") of 127 | [IP,Port] -> 128 | [IP,Port]; 129 | [IP] -> 130 | [IP, integer_to_list(?MONGO_PORT)] 131 | end. 132 | 133 | datetime_to_now(Loctime) -> 134 | Secs = calendar:datetime_to_gregorian_seconds(Loctime) - 719528 * 24 * 60 * 60, 135 | {Secs div 1000000, Secs rem 1000000,0}. 136 | 137 | ensureIndex(Pool,DB,Bin) -> 138 | gen_server:cast(?MODULE, {ensure_index,Pool, DB, Bin}). 139 | clearIndexCache() -> 140 | gen_server:cast(?MODULE, {clear_indexcache}). 141 | 142 | exec_cursor(Pool,Col, Quer) -> 143 | case trysend(Pool,{find, self(), Col, Quer}, safe) of 144 | {ok,MonRef,Pid} -> 145 | receive 146 | {'DOWN', _MonitorRef, _, Pid, _Why} -> 147 | not_connected; 148 | {query_result,_Tm,_Tm1, _Src, <<_:32,CursorID:64/little, _From:32/little, _NDocs:32/little, Result/binary>>} -> 149 | erlang:demonitor(MonRef), 150 | % io:format("cursor ~p from ~p ndocs ~p, ressize ~p ~n", [_CursorID, _From, _NDocs, byte_size(Result)]), 151 | % io:format("~p~n", [Result]), 152 | case CursorID of 153 | 0 -> 154 | {done, Result}; 155 | _ -> 156 | PIDcl = spawn_link(fun() -> cursorcleanup(Pool) end), 157 | PIDcl ! {start, CursorID}, 158 | {#cursor{id = CursorID, limit = Quer#search.ndocs, pid = PIDcl}, Result} 159 | end 160 | after 5000 -> 161 | not_connected 162 | end; 163 | X -> 164 | X 165 | end. 166 | exec_getmore(Pool,Col, C) -> 167 | case erlang:is_process_alive(C#cursor.pid) of 168 | false -> 169 | {done, <<>>}; 170 | true -> 171 | case trysend(Pool,{getmore, self(), Col, C},safe) of 172 | {ok,MonRef,Pid} -> 173 | receive 174 | {'DOWN', _MonitorRef, _, Pid, _Why} -> 175 | not_connected; 176 | {query_result,_Tm,_Tm1, _Src, <<_:32,CursorID:64/little, _From:32/little, _NDocs:32/little, Result/binary>>} -> 177 | erlang:demonitor(MonRef), 178 | % io:format("cursor ~p from ~p ndocs ~p, ressize ~p ~n", [_CursorID, _From, _NDocs, byte_size(Result)]), 179 | % io:format("~p~n", [Result]), 180 | case CursorID of 181 | 0 -> 182 | C#cursor.pid ! {stop}, 183 | {done, Result}; 184 | _ -> 185 | {ok, Result} 186 | end 187 | after 5000 -> 188 | erlang:demonitor(MonRef), 189 | {done, <<>>} 190 | end; 191 | X -> 192 | X 193 | end 194 | end. 195 | exec_delete(Pool,Collection, D) -> 196 | trysend(Pool,{delete,Collection,D},unsafe). 197 | 198 | exec_find(Pool,Collection, Quer) -> 199 | TStart = msnow(), 200 | case trysend(Pool,{find, self(), Collection, Quer}, safe) of 201 | {ok,MonRef,Pid} -> 202 | TSent = msnow(), 203 | receive 204 | {'DOWN', _MonitorRef, _, Pid, _Why} -> 205 | ets:delete(Pool,Pid), 206 | timer:sleep(10), 207 | exec_find(Pool, Collection, Quer); 208 | {query_result,RecvTime,WireTime, Pid, <<_:32,CursorID:64/little, _From:32/little, _NDocs:32/little, Result/binary>>} -> 209 | TRes = msnow(), 210 | case CursorID of 211 | 0 -> 212 | ok; 213 | _ -> 214 | Pid ! {killcursor, #killc{cur_ids = <>}} 215 | end, 216 | erlang:demonitor(MonRef), 217 | Times = #{send_time => TSent - TStart, resp_time => TRes - TSent, recv_time => RecvTime, con => Pid, wire_snd_delay => WireTime - TStart}, 218 | {ok, Times, Result} 219 | after 200000 -> 220 | erlang:demonitor(MonRef), 221 | Pid ! {forget,self()}, 222 | not_connected 223 | end; 224 | X -> 225 | X 226 | end. 227 | exec_insert(Pool,Collection, D) -> 228 | trysend(Pool,{insert,Collection,D}, unsafe). 229 | exec_update(Pool,Collection, D) -> 230 | trysend(Pool,{update,Collection,D}, unsafe). 231 | exec_cmd(Pool,DB, Cmd) -> 232 | TStart = erlang:monotonic_time(millisecond), 233 | CmdBin = bson:encode(Cmd), 234 | Quer = #search{ndocs = 1, nskip = 0, criteria = CmdBin}, 235 | case exec_find(Pool,<>, Quer) of 236 | not_connected -> 237 | not_connected; 238 | <<>> -> 239 | []; 240 | {ok,Tracing, Result} -> 241 | Dec = bson:decode(Result), 242 | TEnd = erlang:monotonic_time(millisecond), 243 | Tracing1 = Tracing#{q => Cmd, q_size => iolist_size(CmdBin), bson_resp_bytes => iolist_size(Result)}, 244 | mongoapi:report(TEnd - TStart, Tracing1), 245 | case Dec of 246 | [Res] -> 247 | Res; 248 | Res -> 249 | Res 250 | end 251 | end. 252 | 253 | trysend(Pool,Query,Type) when is_atom(Pool) -> 254 | Sz = ets:info(Pool,size), 255 | case Sz of 256 | 0 -> 257 | not_connected; 258 | _ -> 259 | case [Pid || {Pid, false} <- ets:tab2list(Pool)] of 260 | [_|_] = LP -> 261 | trysend(lists:nth(rand:uniform(length(LP)), LP), Query, Type); 262 | [] -> 263 | timer:sleep(10), 264 | trysend(Pool, Query, Type) 265 | end 266 | end; 267 | trysend(Pid,Query,safe) -> 268 | MonRef = erlang:monitor(process,Pid), 269 | Pid ! Query, 270 | {ok,MonRef,Pid}; 271 | trysend(Pid,Query,unsafe) -> 272 | Pid ! Query, 273 | ok. 274 | 275 | create_id() -> 276 | bson:dec2hex(<<>>, gen_server:call(?MODULE, {create_oid})). 277 | 278 | startgfs(P) -> 279 | PID = spawn_link(fun() -> gfs_proc(P,<<>>) end), 280 | PID ! {start}, 281 | PID. 282 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 283 | % 284 | % IMPLEMENTATION 285 | % 286 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 287 | -define(STATE_INIT, init). 288 | -define(STATE_ACTIVE, active). 289 | -define(STATE_CLOSING, closing). 290 | 291 | % read = connection used for reading (find) from mongo server 292 | % write = connection used for writing (insert,update) to mongo server 293 | % single: same as replicaPairs (single server is always master and used for read and write) 294 | % masterSlave: read = write = master 295 | % replicaPairs: read = write = master 296 | % masterMaster: pick one at random 297 | % timer is reconnect timer if some connection is missing 298 | -record(pool, {name, size = 0, info, cb, style=default, us, pw}). 299 | % indexes is ensureIndex cache (an ets table). 300 | % pids: #{PID => {PoolName,[active|init|closing]}} 301 | % retry: [PoolName1,PoolName2] 302 | -record(mngd, {indexes, pools = [], pids = #{}, retry = [], hashed_hostn, oid_index = 1}). 303 | 304 | handle_call({create_oid}, _, P) -> 305 | N = P#mngd.oid_index band 16#ffffff, 306 | T = erlang:monotonic_time(millisecond), 307 | R1 = rand:uniform(255), 308 | R2 = rand:uniform(255), 309 | R3 = rand:uniform(255), 310 | R4 = rand:uniform(255), 311 | R5 = rand:uniform(255), 312 | Out = <>, 313 | {reply, Out, P#mngd{oid_index = P#mngd.oid_index + 1}}; 314 | % handle_call({is_connected,Name}, _, P) -> 315 | % case get(Name) of 316 | % X when is_pid(X#conn.pid) -> 317 | % {reply, true, P}; 318 | % _X -> 319 | % {reply, false, P} 320 | % end; 321 | handle_call(stop, _, P) -> 322 | {stop, shutdown, stopped, P}; 323 | handle_call(_, _, P) -> 324 | {reply, ok, P}. 325 | 326 | handle_cast({ensure_index,Pool, DB, Bin}, P) -> 327 | case ets:lookup(P#mngd.indexes, {DB,Bin}) of 328 | [] -> 329 | spawn(fun() -> 330 | Mon = mongoapi:new(Pool,DB), 331 | case mongoapi:insert(<<"system.indexes">>, hd(bson:decode(Bin)), Mon) of 332 | ok -> 333 | gen_server:cast(?MODULE,{ensure_index_store, DB, Bin}); 334 | {error,[Ok|_]} when Ok > 0 -> 335 | gen_server:cast(?MODULE,{ensure_index_store, DB, Bin}); 336 | _E -> 337 | error_logger:error_msg("erlmongo ensure_index failed db=~p, err=~p~n", [DB,_E]), 338 | ok 339 | end 340 | end); 341 | _ -> 342 | true 343 | end, 344 | {noreply, P}; 345 | handle_cast({ensure_index_store, DB,Bin}, P) -> 346 | ets:insert(P#mngd.indexes, {{DB,Bin}}), 347 | {noreply, P}; 348 | handle_cast({clear_indexcache}, P) -> 349 | ets:delete_all_objects(P#mngd.indexes), 350 | {noreply, P}; 351 | handle_cast({conninfo, Pool, Size, Us, Pw, Info}, P) -> 352 | ?DBG("conninfo ~p", [{Pool,Size,Info}]), 353 | Pools = case lists:keyfind(Pool,#pool.name,P#mngd.pools) of 354 | false -> 355 | ets:new(Pool,[named_table,public,set]), 356 | [#pool{name = Pool, size = Size, info = Info, us = Us, pw = Pw}|P#mngd.pools]; 357 | #pool{info = OldInfo} when OldInfo == Info -> 358 | P#mngd.pools; 359 | Existing -> 360 | lists:keystore(Pool, 1, P#mngd.pools, Existing#pool{info = Info}) 361 | end, 362 | handle_cast(save_connections,P#mngd{pools = Pools}); 363 | handle_cast(save_connections,P) -> 364 | L = [{Pool#pool.name, Pool#pool.size, Pool#pool.info, Pool#pool.us, Pool#pool.pw} || Pool <- P#mngd.pools], 365 | application:set_env(erlmongo,connections,L), 366 | {noreply, P}; 367 | handle_cast({start_connection, Pool,CB}, P) -> 368 | ?DBG("start_connection ~p ~p~n", [Pool, P#mngd.pools]), 369 | case lists:keyfind(Pool, #pool.name, P#mngd.pools) of 370 | false -> 371 | {noreply, P}; 372 | PI -> 373 | {noreply, start_connection(P, Pool, PI#pool{cb = CB})} 374 | end; 375 | handle_cast({delete_connection, Pool}, P) -> 376 | case lists:keymember(Pool,#pool.name, P#mngd.pools) of 377 | false -> 378 | {noreply, P}; 379 | _ -> 380 | [element(1,ConObj) ! stop || ConObj <- ets:tab2list(Pool)], 381 | ets:delete(Pool), 382 | handle_cast(save_connections, P#mngd{pools = lists:keydelete(Pool,#pool.name,P#mngd.pools)}) 383 | end; 384 | handle_cast({print_info}, P) -> 385 | io:format("~p ~p~n~p~n", [self(),get(),P]), 386 | {noreply, P}; 387 | handle_cast(_, P) -> 388 | {noreply, P}. 389 | 390 | startcon(Name, Addr, Port, Us, Pw) when is_list(Port) -> 391 | startcon(Name, Addr, list_to_integer(Port), Us, Pw); 392 | startcon(Name, Addr, Port, Us, Pw) -> 393 | {PID,_} = spawn_monitor(fun() -> connection_init(true) end), 394 | PID ! {start, Name, self(), Addr, Port, Us, Pw}, 395 | PID. 396 | 397 | start_connection(P, PoolName, PI) -> 398 | Servers = PI#pool.info, 399 | Existing = [PID || {PID,{Pool1,State}} <- maps:to_list(P#mngd.pids), 400 | Pool1 == PoolName andalso (State == ?STATE_ACTIVE orelse State == ?STATE_INIT)], 401 | ExLen = length(Existing), 402 | LenServers = length(Servers), 403 | case ok of 404 | _ when ExLen < PI#pool.size -> 405 | Additional = [begin 406 | {IP,Port} = lists:nth(rand:uniform(LenServers), Servers), 407 | PID = startcon(PoolName, IP, Port, PI#pool.us, PI#pool.pw), 408 | {PID, {PoolName,?STATE_INIT}} 409 | end || _ <- lists:seq(1,PI#pool.size - ExLen)], 410 | P#mngd{pids = maps:merge(P#mngd.pids, maps:from_list(Additional))}; 411 | _ when ExLen > PI#pool.size -> 412 | {_Keepers,Gonners} = lists:split(PI#pool.size, Existing), 413 | [begin 414 | PID ! stop, 415 | PID 416 | end || PID <- Gonners], 417 | New = maps:fold(fun(PID,_,Map) -> 418 | case lists:member(PID,Gonners) of 419 | true -> 420 | ets:delete(PoolName, PID), 421 | maps:put(PID,{PoolName,?STATE_CLOSING}, Map); 422 | false -> 423 | Map 424 | end 425 | end, P#mngd.pids,P#mngd.pids), 426 | P#mngd{pids = New}; 427 | _ -> 428 | P 429 | end. 430 | 431 | conn_callback(P) -> 432 | case is_pid(P) of 433 | true -> 434 | P ! {mongodb_connected}; 435 | false -> 436 | case P of 437 | {Mod,Fun,Param} -> 438 | spawn(Mod,Fun,Param); 439 | _ -> 440 | true 441 | end 442 | end. 443 | 444 | reconnect(#mngd{retry = [H|T]} = P) -> 445 | case lists:keyfind(H,#pool.name, P#mngd.pools) of 446 | false -> 447 | reconnect(P#mngd{retry = T}); 448 | PI -> 449 | reconnect(start_connection(P#mngd{retry = T}, H, PI)) 450 | end; 451 | reconnect(P) -> 452 | P. 453 | 454 | handle_info(reconnect, P) -> 455 | % case P#mngd.retry of 456 | % [_|_] -> 457 | % io:format("reconnect ~p~n",[P#mngd.retry]); 458 | % _ -> 459 | % ok 460 | % end, 461 | erlang:send_after(?RECONNECT_DELAY, self(),reconnect), 462 | {noreply, reconnect(P)}; 463 | handle_info({'DOWN', _MonitorRef, _, PID, auth_failed}, P) -> 464 | case maps:get(PID,P#mngd.pids, undefined) of 465 | {Pool,_} -> 466 | cleanup(Pool), 467 | {noreply, P#mngd{pids = maps:remove(PID,P#mngd.pids)}}; 468 | _ -> 469 | {noreply, P} 470 | end; 471 | handle_info({'DOWN', _MonitorRef, _, PID, Why}, P) -> 472 | ?DBG("conndied ~p ~p", [PID,maps:get(PID,P#mngd.pids, undefined)]), 473 | % io:format("condied ~p~n", [{PID,Why}]), 474 | case maps:get(PID,P#mngd.pids, undefined) of 475 | undefined -> 476 | {noreply, P}; 477 | {Pool,?STATE_INIT} -> 478 | cleanup(Pool), 479 | {noreply, P#mngd{pids = maps:remove(PID,P#mngd.pids), retry = add(Pool,P#mngd.retry)}}; 480 | {Pool,?STATE_CLOSING} -> 481 | cleanup(Pool), 482 | {noreply, P#mngd{pids = maps:remove(PID,P#mngd.pids)}}; 483 | {Pool,?STATE_ACTIVE} -> 484 | cleanup(Pool), 485 | case Why of 486 | normal -> 487 | ok; 488 | _ -> 489 | error_logger:error_msg("erlmongo connection died ~p reason=~p pool=~p~n", [PID,Why,Pool]) 490 | end, 491 | handle_cast({start_connection, Pool, undefined}, P#mngd{pids = maps:remove(PID,P#mngd.pids)}) 492 | end; 493 | handle_info({query_result,_Tm,_Tm1, Src, <<_:20/binary, Res/binary>>}, P) -> 494 | case maps:get(Src, P#mngd.pids, undefined) of 495 | {Pool,PidState} -> 496 | % io:format("got ismaster query_result ~p~n", [Pool]), 497 | case catch bson:decode(Res) of 498 | [Obj] -> 499 | case proplists:get_value(<<"ismaster">>,Obj) of 500 | Val when Val == true; Val == 1 -> 501 | ?DBG("foundmaster ~p~n", [Src]), 502 | case is_connected(Pool) of 503 | false -> 504 | ets:insert(Pool,{Src, false}), 505 | PI = lists:keyfind(Pool,#pool.name, P#mngd.pools), 506 | conn_callback(PI#pool.cb); 507 | true when PidState == ?STATE_INIT -> 508 | ets:insert(Pool,{Src, false}); 509 | true -> 510 | ok 511 | end, 512 | case PidState of 513 | ?STATE_INIT -> 514 | {noreply, P#mngd{pids = maps:put(Src,{Pool,active}, P#mngd.pids)}}; 515 | ?STATE_ACTIVE -> 516 | {noreply, P}; 517 | _ -> 518 | {noreply, P} 519 | end; 520 | _ -> 521 | Src ! {stop}, 522 | case proplists:get_value(<<"primary">>,Obj) of 523 | undefined -> 524 | {noreply, P#mngd{pids = maps:remove(Src,P#mngd.pids), retry = add(Pool,P#mngd.retry)}}; 525 | Prim -> 526 | ?DBG("Connecting to primary ~p", [Prim]), 527 | [Addr,Port] = string:tokens(binary_to_list(Prim),":"), 528 | PI = lists:keyfind(Pool,#pool.name, P#mngd.pools), 529 | PID = startcon(Pool, Addr,Port, PI#pool.us, PI#pool.pw), 530 | {noreply, P#mngd{pids = maps:remove(Src,maps:put(PID,{Pool,init},P#mngd.pids))}} 531 | end 532 | end; 533 | _ -> 534 | Src ! {stop}, 535 | {noreply, P#mngd{pids = maps:remove(Src,P#mngd.pids), retry = add(Pool,P#mngd.retry)}} 536 | end; 537 | undefined -> 538 | Src ! {stop}, 539 | {noreply, P} 540 | end; 541 | handle_info({query_result,_Tm,_Tm1, Src, _}, P) -> 542 | Src ! {stop}, 543 | {Pool,?STATE_INIT} = maps:get(Src, P#mngd.pids), 544 | error_logger:error_msg("erlmongo ismaster invalid response pool=~p", [Pool]), 545 | {noreply, P#mngd{pids = maps:remove(Src,P#mngd.pids), retry = add(Pool,P#mngd.retry)}}; 546 | handle_info(stop,_P) -> 547 | exit(stop); 548 | handle_info(_X, P) -> 549 | io:format("~p~n", [_X]), 550 | {noreply, P}. 551 | cleanup(Pool) -> 552 | [case erlang:is_process_alive(element(1,Pid)) of 553 | false -> 554 | ets:delete(Pool,element(1,Pid)); 555 | true -> 556 | ok 557 | end || Pid <- ets:tab2list(Pool)]. 558 | 559 | add(K,L) -> 560 | case lists:member(K,L) of 561 | true -> 562 | L; 563 | false -> 564 | [K|L] 565 | end. 566 | 567 | % conndied(Name,PID,P) when P#conn.pid == PID -> 568 | % put(Name, P#conn{pid = undefined, timer = timer(P#conn.timer, Name)}); 569 | % conndied(_,_,_) -> 570 | % ok. 571 | 572 | terminate(_, _) -> 573 | ok. 574 | code_change(_, P, _) -> 575 | {ok, P}. 576 | init([]) -> 577 | erlang:send_after(?RECONNECT_DELAY, self(),reconnect), 578 | case application:get_env(erlmongo,connections) of 579 | {ok, L} -> 580 | [gen_server:cast(?MODULE,{conninfo, Pool, Sz, Info, Us, Pw}) || {Pool,Sz,Info,Us,Pw} <- L], 581 | [connect(Pool) || {Pool,_,_} <- L]; 582 | _ -> 583 | true 584 | end, 585 | {ok, HN} = inet:gethostname(), 586 | <> = erlang:md5(HN), 587 | {ok, #mngd{indexes = ets:new(mongoIndexes, [set, private]), hashed_hostn = HashedHN, oid_index = rand:uniform(16#ffffff)}}. 588 | 589 | set_ssl(V)-> 590 | set_ssl(V, []). 591 | set_ssl(false, _Opts)-> 592 | application:set_env(erlmongo, ssl, false); 593 | set_ssl(true, Opts)-> 594 | application:set_env(erlmongo, ssl, true), 595 | application:set_env(erlmongo, ssl_opts, Opts). 596 | is_ssl()-> 597 | application:get_env(erlmongo, ssl, false). % defaults to false 598 | ssl_opts()-> 599 | application:get_env(erlmongo, ssl_opts, []). 600 | 601 | gfs_proc(#gfs_state{mode = write} = P, Buf) -> 602 | receive 603 | {write, Bin} -> 604 | Compl = <>, 605 | case true of 606 | _ when byte_size(Compl) >= P#gfs_state.flush_limit -> 607 | self() ! {flush}; 608 | _ -> 609 | true 610 | end, 611 | gfs_proc(P, Compl); 612 | {flush} -> 613 | FlSize = (byte_size(Buf) div (P#gfs_state.file)#gfs_file.chunkSize) * (P#gfs_state.file)#gfs_file.chunkSize, 614 | <> = Buf, 615 | gfs_proc(gfsflush(P, FlushBin, <<>>),Rem); 616 | {close} -> 617 | gfsflush(P#gfs_state{closed = true}, Buf, <<>>); 618 | {'EXIT',_,_} -> 619 | self() ! {close}, 620 | gfs_proc(P,Buf); 621 | {start} -> 622 | process_flag(trap_exit,true), 623 | FileID = (P#gfs_state.file)#gfs_file.docid, 624 | exec_update(P#gfs_state.pool,<<(P#gfs_state.collection)/binary, ".files">>, #update{selector = bson:encode([{<<"_id">>, {oid, FileID}}]), 625 | document = bson:encoderec(P#gfs_state.file)}), 626 | Keys = [{<<"files_id">>, 1},{<<"n">>,1}], 627 | Bin = bson:encode([{plaintext, <<"name">>, bson:gen_prop_keyname(Keys, <<>>)}, 628 | {plaintext, <<"ns">>, <<(P#gfs_state.collection)/binary, ".chunks">>}, 629 | {<<"key">>, {bson, bson:encode(Keys)}}]), 630 | ensureIndex(P#gfs_state.pool,P#gfs_state.db, Bin), 631 | gfs_proc(P,<<>>) 632 | % X -> 633 | % io:format("Received unknown msg ~p~n", [X]) 634 | end; 635 | gfs_proc(#gfs_state{mode = read} = P, Buf) -> 636 | receive 637 | {read, Source, RecN} -> 638 | CSize = (P#gfs_state.file)#gfs_file.chunkSize, 639 | FileLen = (P#gfs_state.file)#gfs_file.length, 640 | case FileLen - CSize * P#gfs_state.nchunk of 641 | LenRem when LenRem >= RecN -> 642 | N = RecN; 643 | LenRem when LenRem > 0 -> 644 | N = LenRem; 645 | _ -> 646 | N = byte_size(Buf) 647 | end, 648 | case true of 649 | _ when N =< byte_size(Buf) -> 650 | <> = Buf, 651 | Source ! {gfs_bytes, Ret}, 652 | gfs_proc(P, Rem); 653 | _ -> 654 | GetChunks = ((N - byte_size(Buf)) div CSize) + 1, 655 | Quer = #search{ndocs = GetChunks, nskip = 0, 656 | criteria = bson:encode([{<<"files_id">>, (P#gfs_state.file)#gfs_file.docid}, 657 | {<<"n">>, {in,{gte, P#gfs_state.nchunk},{lte, P#gfs_state.nchunk + GetChunks}}}]), 658 | field_selector = get(field_selector)}, 659 | case exec_find(P#gfs_state.pool,<<(P#gfs_state.collection)/binary, ".chunks">>, Quer) of 660 | not_connected -> 661 | Source ! not_connected, 662 | gfs_proc(P,Buf); 663 | <<>> -> 664 | Source ! eof, 665 | gfs_proc(P,Buf); 666 | ResBin -> 667 | % io:format("Result ~p~n", [ResBin]), 668 | Result = chunk2bin(bson:decode(ResBin), <<>>), 669 | case true of 670 | _ when byte_size(Result) + byte_size(Buf) =< N -> 671 | Rem = <<>>, 672 | Source ! {gfs_bytes, <>}; 673 | _ -> 674 | <> = <>, 675 | Source ! {gfs_bytes, ReplyBin} 676 | end, 677 | gfs_proc(P#gfs_state{nchunk = P#gfs_state.nchunk + GetChunks}, Rem) 678 | end 679 | end; 680 | {close} -> 681 | true; 682 | {start} -> 683 | put(field_selector, bson:encode([{<<"data">>, 1}])), 684 | gfs_proc(P, <<>>) 685 | end. 686 | 687 | chunk2bin([[_, {_, {binary, 2, Chunk}}]|T], Bin) -> 688 | chunk2bin(T, <>); 689 | chunk2bin(_, B) -> 690 | B. 691 | 692 | 693 | gfsflush(P, Bin, Out) -> 694 | CSize = (P#gfs_state.file)#gfs_file.chunkSize, 695 | FileID = (P#gfs_state.file)#gfs_file.docid, 696 | case Bin of 697 | <> -> 698 | Chunk = #gfs_chunk{docid = {oid,create_id()}, files_id = FileID, n = P#gfs_state.nchunk, data = {binary, 2, ChunkBin}}, 699 | gfsflush(P#gfs_state{nchunk = P#gfs_state.nchunk + 1, length = P#gfs_state.length + CSize}, 700 | Rem, <>); 701 | Rem when P#gfs_state.closed == true, byte_size(Rem) > 0 -> 702 | Chunk = #gfs_chunk{docid = {oid,create_id()}, files_id = FileID, n = P#gfs_state.nchunk, data = {binary, 2, Rem}}, 703 | gfsflush(P#gfs_state{length = P#gfs_state.length + byte_size(Rem)}, 704 | <<>>, <>); 705 | Rem when byte_size(Out) > 0 -> 706 | File = P#gfs_state.file, 707 | exec_insert(P#gfs_state.pool,<<(P#gfs_state.collection)/binary, ".chunks">>, #insert{documents = Out}), 708 | case P#gfs_state.closed of 709 | true -> 710 | MD5Cmd = exec_cmd(P#gfs_state.pool,P#gfs_state.db, [{<<"filemd5">>, FileID},{<<"root">>, P#gfs_state.coll_name}]), 711 | case proplists:get_value(<<"md5">>,MD5Cmd) of 712 | undefined -> 713 | error_logger:error_msg("Md5 cmd failed ~p", [MD5Cmd]), 714 | MD5 = undefined, 715 | ok; 716 | MD5 -> 717 | ok 718 | end; 719 | false -> 720 | MD5 = undefined 721 | end, 722 | exec_update(P#gfs_state.pool,<<(P#gfs_state.collection)/binary, ".files">>, #update{selector = bson:encode([{<<"_id">>, FileID}]), 723 | document = bson:encoderec(File#gfs_file{length = P#gfs_state.length, 724 | md5 = MD5})}), 725 | gfsflush(P, Rem, <<>>); 726 | _Rem -> 727 | P 728 | end. 729 | 730 | -record(ccd, {conn,cursor = 0}). 731 | % Just for cleanup 732 | cursorcleanup(P) -> 733 | receive 734 | {stop} -> 735 | true; 736 | {cleanup} -> 737 | P#ccd.conn ! {killcursor, #killc{cur_ids = <<(P#ccd.cursor):64/little>>}}; 738 | {'EXIT', _PID, _Why} -> 739 | self() ! {cleanup}, 740 | cursorcleanup(P); 741 | {start, Cursor} -> 742 | process_flag(trap_exit, true), 743 | cursorcleanup(#ccd{conn = P,cursor = Cursor}) 744 | end. 745 | 746 | msnow() -> 747 | erlang:monotonic_time(millisecond). 748 | 749 | -record(con, {pool,sock, die = false, die_attempt_cnt = 0, auth}). 750 | -record(auth, {step = 0, us, pw, source, nonce, first_msg, sig, conv_id}). 751 | con_candie() -> 752 | [Pid || {Ind,Pid} <- get(), is_integer(Ind) andalso is_pid(Pid)] == []. 753 | % Proc. d.: 754 | % {ReqID, ReplyPID} 755 | % Waiting for request 756 | connection_init(_) -> 757 | connection(#con{},1,<<>>). 758 | connection(#con{} = P,Index,Buf) -> 759 | receive 760 | {forget, Source} -> 761 | case lists:keyfind(Source,2,get()) of 762 | false -> 763 | ok; 764 | Id -> 765 | erase({Id,time}), 766 | erase(Id) 767 | end, 768 | connection(P,Index,Buf); 769 | {find, Source, Collection, Query} -> 770 | case P#con.die of 771 | true -> 772 | ok; 773 | _ -> 774 | ets:insert(P#con.pool,{self(),true}) 775 | end, 776 | case catch constr_query(Query,Index, Collection) of 777 | {'EXIT',_} -> 778 | Source ! {query_result, 0,0, self(), badquery}, 779 | error_logger:error_msg("Invalid query ~p ~p ~p",[Source, Collection, Query]), 780 | connection(P, Index+1, Buf); 781 | QBin -> 782 | ok = do_send(P#con.sock, QBin), 783 | put(Index,Source), 784 | put({Index,time}, msnow()), 785 | connection(P, Index+1, Buf) 786 | end; 787 | {insert, Collection, Doc} -> 788 | Bin = constr_insert(Doc, Collection), 789 | ok = do_send(P#con.sock, Bin), 790 | connection(P, Index,Buf); 791 | {update, Collection, #update{} = Doc} -> 792 | Bin = constr_update(Doc, Collection), 793 | ok = do_send(P#con.sock, Bin), 794 | connection(P,Index, Buf); 795 | {update, Collection, [_|_] = Doc} -> 796 | Bin = lists:foldl(fun(D,B) -> [B,(constr_update(D, Collection))] end, [],Doc), 797 | ok = do_send(P#con.sock, Bin), 798 | connection(P,Index, Buf); 799 | {delete, Col, D} -> 800 | Bin = constr_delete(D, Col), 801 | ok = do_send(P#con.sock, Bin), 802 | connection(P,Index, Buf); 803 | {getmore, Source, Col, C} -> 804 | Bin = constr_getmore(C, Index, Col), 805 | ok = do_send(P#con.sock, Bin), 806 | put(Index,Source), 807 | connection(P,Index+1, Buf); 808 | {killcursor, C} -> 809 | Bin = constr_killcursors(C), 810 | ok = do_send(P#con.sock, Bin), 811 | connection(P,Index, Buf); 812 | {tcp, _, Bin} -> 813 | % io:format("~p~n", [{byte_size(Bin), Buf}]), 814 | connection(P,Index,readpacket(P#con.pool,P#con.die,<>)); 815 | {ssl, _, Bin} -> 816 | % io:format("~p~n", [{byte_size(Bin), Buf}]), 817 | connection(P,Index,readpacket(P#con.pool,P#con.die,<>)); 818 | {ping} -> 819 | case P#con.die of 820 | true -> 821 | ok; 822 | _ -> 823 | erlang:send_after(1000,self(),{ping}), 824 | self() ! {find, whereis(?MODULE), <<"admin.$cmd">>, 825 | #search{nskip = 0, ndocs = 1, criteria = bson:encode([{<<"ismaster">>, 1}])}} 826 | end, 827 | connection(P, Index, Buf); 828 | % Collection = <<"admin.$cmd">>, 829 | % Query = #search{nskip = 0, ndocs = 1, criteria = bson:encode([{<<"ping">>, 1}])}, 830 | % QBin = constr_query(Query,Index, Collection), 831 | % ok = do_send(P#con.sock, QBin), 832 | % connection(P,Index+1,Buf); 833 | {stop} -> 834 | ets:delete(P#con.pool,self()), 835 | case con_candie() of 836 | true -> 837 | ok; 838 | _ -> 839 | connection(P#con{die = true}, Index, Buf) 840 | end; 841 | {start, Pool, Source, IP, Port, Us, Pw} -> 842 | {ok, Sock} = do_connect(IP, Port, 1000), 843 | erlang:send_after(1000,self(),{ping}), 844 | erlang:send_after(40000 + rand:uniform(20000),self(),{stop}), 845 | connection(#con{pool = Pool, sock = Sock, auth = init_auth(Source, Us, Pw)},1, <<>>); 846 | {query_result,_Tm,_Tm1, _Me, <<_:32,_CursorID:64/little, _From:32/little, _NDocs:32/little, Packet/binary>>} -> 847 | case (catch scram_step(P#con.auth, Packet)) of 848 | {'EXIT', Err} -> 849 | case code:is_loaded(logger) of 850 | false -> 851 | error_logger:error_msg("Authentication failed ~p", [Err]); 852 | _ -> 853 | logger:error("Authentication failed ~p", [Err]) 854 | end, 855 | exit(auth_failed); 856 | R -> 857 | connection(P#con{auth = R},Index, Buf) 858 | end; 859 | {tcp_closed, _} -> 860 | exit(tcp_closed); 861 | {ssl_closed, _} -> 862 | exit(ssl_closed) 863 | after 2000 -> 864 | case P#con.die of 865 | true -> 866 | case con_candie() of 867 | true -> 868 | ok; 869 | _ when P#con.die_attempt_cnt > 2 -> 870 | ok; 871 | _ -> 872 | connection(P#con{die_attempt_cnt = P#con.die_attempt_cnt + 1}, Index, Buf) 873 | end; 874 | false -> 875 | connection(P, Index, Buf) 876 | end 877 | end. 878 | 879 | % SSL/GEN_TCP connect switch 880 | do_connect(Host, Port, Timeout)-> 881 | do_connect(Host, Port, Timeout, is_ssl(), ssl_opts()). 882 | do_connect(Host, Port, Timeout, true, Opts) -> 883 | {ok, _} = application:ensure_all_started(ssl), 884 | ssl:connect(Host, Port, [binary, {active, true}, {packet, raw}, {nodelay,true}] ++ Opts, Timeout); 885 | do_connect(Host, Port, Timeout, false, _) -> 886 | gen_tcp:connect(Host, Port, [binary, {active, true}, {packet, raw}, {nodelay,true}], Timeout). 887 | 888 | do_send(Sock, Packet) -> 889 | do_send(Sock, Packet, is_ssl()). 890 | do_send(Sock, Packet, true)-> 891 | ssl:send(Sock, Packet); 892 | do_send(Sock, Packet, false)-> 893 | gen_tcp:send(Sock, Packet). 894 | 895 | init_auth(Source, undefined,undefined) -> 896 | self() ! {find, Source, <<"admin.$cmd">>, 897 | #search{nskip = 0, ndocs = 1, criteria = bson:encode([{<<"ismaster">>, 1}])}}, 898 | undefined; 899 | init_auth(Source, Us,Pw) -> 900 | scram_first_step_start(#auth{us = Us, pw = Pw, source = Source}). 901 | 902 | readpacket(Pool,Die,<> = Bin) -> 903 | BodySize = ComplSize-16, 904 | case Body of 905 | <> -> 906 | Time = erlang:monotonic_time(millisecond), 907 | case get(building) of 908 | undefined -> 909 | Start = Time; 910 | Start -> 911 | ok 912 | end, 913 | erase(building), 914 | case is_pid(get(RespID)) of 915 | true -> 916 | WireSendTime = get({RespID,time}), 917 | get(RespID) ! {query_result, Time - Start, WireSendTime, self(), Packet}, 918 | erase(RespID), 919 | erase({RespID,time}), 920 | case Pool of 921 | undefined -> 922 | ok; 923 | _ when Die -> 924 | ok; 925 | _ -> 926 | ets:insert(Pool,{self(),not con_candie()}) 927 | end; 928 | false -> 929 | true 930 | end, 931 | case Rem of 932 | <<>> -> 933 | <<>>; 934 | _ -> 935 | readpacket(Pool,Die,Rem) 936 | end; 937 | _ -> 938 | case get(building) of 939 | undefined -> 940 | put(building,erlang:monotonic_time(millisecond)); 941 | _ -> 942 | ok 943 | end, 944 | Bin 945 | end; 946 | readpacket(_Pool,_Die,Bin) -> 947 | Bin. 948 | 949 | 950 | constr_header(Len, ID, RespTo, OP) -> 951 | <<(Len+16):32/little, ID:32/little, RespTo:32/little, OP:32/little>>. 952 | 953 | constr_update(U, Name) -> 954 | Update = <<0:32, Name/binary, 0:8, (U#update.upsert):32/little>>, 955 | Sz = byte_size(Update) + byte_size(U#update.selector) + byte_size(U#update.document), 956 | Header = constr_header(Sz, 0, 0, ?OP_UPDATE), 957 | [Header, Update, U#update.selector, U#update.document]. 958 | 959 | constr_insert(U, Name) -> 960 | Insert = <<0:32, Name/binary, 0:8>>, 961 | Header = constr_header(byte_size(Insert)+byte_size(U#insert.documents), 0, 0, ?OP_INSERT), 962 | [Header, Insert, U#insert.documents]. 963 | 964 | constr_query(U, Index, Name) -> 965 | QueryBody = [U#search.criteria, U#search.field_selector], 966 | Query = <<(U#search.opts):32/little, Name/binary, 0:8, (U#search.nskip):32/little, (U#search.ndocs):32/little>>, 967 | Header = constr_header(byte_size(Query)+iolist_size(QueryBody), Index, 0, ?OP_QUERY), 968 | [Header,Query,QueryBody]. 969 | 970 | constr_getmore(U, Index, Name) -> 971 | GetMore = <<0:32, Name/binary, 0:8, (U#cursor.limit):32/little, (U#cursor.id):64/little>>, 972 | Header = constr_header(byte_size(GetMore), Index, 0, ?OP_GET_MORE), 973 | [Header, GetMore]. 974 | 975 | constr_delete(U, Name) -> 976 | Delete = <<0:32, Name/binary, 0:8, 0:32, (U#delete.selector)/binary>>, 977 | Header = constr_header(byte_size(Delete), 0, 0, ?OP_DELETE), 978 | [Header, Delete]. 979 | 980 | constr_killcursors(U) -> 981 | Kill = <<0:32, (byte_size(U#killc.cur_ids) div 8):32/little, (U#killc.cur_ids)/binary>>, 982 | Header = constr_header(byte_size(Kill), 0, 0, ?OP_KILL_CURSORS), 983 | [Header, Kill]. 984 | 985 | % Taken and modified from 986 | % https://github.com/comtihon/mongodb-erlang/blob/master/src/connection/mc_auth_logic.erl 987 | 988 | %% @private 989 | scram_first_step_start(P) -> 990 | RandomBString = base64:encode(crypto:strong_rand_bytes(32)), 991 | FirstMessage = compose_first_message(P#auth.us, RandomBString), 992 | Message = <<"n,,", FirstMessage/binary>>, 993 | Doc = [{<<"saslStart">>, 1}, 994 | {<<"mechanism">>, <<"SCRAM-SHA-1">>}, 995 | {<<"payload">>, {binary, Message}}, 996 | {<<"autoAuthorize">>, 1}], 997 | self() ! {find, self(), <<"admin.$cmd">>, 998 | #search{nskip = 0, ndocs = 1, criteria = bson:encode(Doc)}}, 999 | P#auth{nonce = RandomBString, first_msg = FirstMessage, step = 1}. 1000 | 1001 | scram_step(#auth{step = 1} = P, Res1) -> 1002 | % {true, Res} = mc_worker_api:sync_command(Socket, <<"admin">>, 1003 | % {<<"saslStart">>, 1, <<"mechanism">>, <<"SCRAM-SHA-1">>, <<"payload">>, {bin, bin, Message}, <<"autoAuthorize">>, 1}, SetOpts), 1004 | [Res] = bson:decode(map, Res1), 1005 | ConversationId = maps:get(<<"conversationId">>, Res, {}), 1006 | Payload = maps:get(<<"payload">>, Res), 1007 | scram_second_step_start(P, Payload, ConversationId); 1008 | scram_step(#auth{step = 2} = P, Res1) -> 1009 | [Res] = bson:decode(map, Res1), 1010 | scram_third_step_start(P, base64:encode(P#auth.sig), Res); 1011 | scram_step(#auth{step = 4} = P, Res1) -> 1012 | [#{<<"done">> := true}] = bson:decode(map, Res1), 1013 | init_auth(P#auth.source, undefined, undefined). 1014 | 1015 | 1016 | %% @private 1017 | scram_second_step_start(P, {binary, _, Decoded} = _Payload, ConversationId) -> 1018 | {Signature, ClientFinalMessage} = compose_second_message(Decoded, P#auth.us, P#auth.pw, P#auth.nonce, P#auth.first_msg), 1019 | % {true, Res} = mc_worker_api:sync_command(Socket, <<"admin">>, {<<"saslContinue">>, 1, <<"conversationId">>, ConversationId, 1020 | % <<"payload">>, {bin, bin, ClientFinalMessage}}, SetOpts), 1021 | Doc = [{<<"saslContinue">>, 1}, 1022 | {<<"conversationId">>, ConversationId}, 1023 | {<<"payload">>, {binary, ClientFinalMessage}}], 1024 | self() ! {find, self(), <<"admin.$cmd">>, 1025 | #search{nskip = 0, ndocs = 1, criteria = bson:encode(Doc)}}, 1026 | P#auth{sig = Signature, step = 2, conv_id = ConversationId}. 1027 | 1028 | %% @private 1029 | scram_third_step_start(P, ServerSignature, Response) -> 1030 | {binary, _, Payload} = maps:get(<<"payload">>, Response), 1031 | Done = maps:get(<<"done">>, Response, false), 1032 | ParamList = parse_server_responce(Payload), 1033 | {_,ServerSignature} = lists:keyfind(<<"v">>,1, ParamList), 1034 | scram_forth_step_start(P, Done). 1035 | 1036 | %% @private 1037 | scram_forth_step_start(P, true) -> init_auth(P, undefined, undefined); 1038 | scram_forth_step_start(P, false) -> 1039 | Doc = [{<<"saslContinue">>, 1}, 1040 | {<<"conversationId">>, P#auth.conv_id}, 1041 | {<<"payload">>, {binary, <<>>}}], 1042 | self() ! {find, self(), <<"admin.$cmd">>, 1043 | #search{nskip = 0, ndocs = 1, criteria = bson:encode(Doc)}}, 1044 | P#auth{step = 4}. 1045 | 1046 | %% @private 1047 | compose_first_message(Login, RandomBString) -> 1048 | UserName = <<<<"n=">>/binary, (encode_name(Login))/binary>>, 1049 | Nonce = <<<<"r=">>/binary, RandomBString/binary>>, 1050 | <>/binary, Nonce/binary>>. 1051 | 1052 | encode_name(Name) -> 1053 | Comma = re:replace(Name, <<"=">>, <<"=3D">>, [{return, binary}]), 1054 | re:replace(Comma, <<",">>, <<"=2C">>, [{return, binary}]). 1055 | 1056 | %% @private 1057 | compose_second_message(Payload, Login, Password, RandomBString, FirstMessage) -> 1058 | ParamList = parse_server_responce(Payload), 1059 | {_,R} = lists:keyfind(<<"r">>,1, ParamList), 1060 | Nonce = <<"r=", R/binary>>, 1061 | RandSz = byte_size(RandomBString), 1062 | <> = R, 1063 | {_,S} = lists:keyfind(<<"s">>,1, ParamList), 1064 | I = binary_to_integer(element(2,lists:keyfind(<<"i">>,1, ParamList))), 1065 | SaltedPassword = pbkdf2(pw_hash(Login, Password), base64:decode(S), I, 20), 1066 | ChannelBinding = <<"c=", (base64:encode(<<"n,,">>))/binary>>, 1067 | ClientFinalMessageWithoutProof = <>, 1068 | AuthMessage = <>, 1069 | ServerSignature = generate_sig(SaltedPassword, AuthMessage), 1070 | Proof = generate_proof(SaltedPassword, AuthMessage), 1071 | {ServerSignature, <>}. 1072 | 1073 | pw_hash(Username, Password) -> 1074 | bson:dec2hex(<<>>, crypto:hash(md5, [Username, <<":mongo:">>, Password])). 1075 | 1076 | %% @private 1077 | generate_proof(SaltedPassword, AuthMessage) -> 1078 | ClientKey = crypto:hmac(sha, SaltedPassword, <<"Client Key">>), 1079 | StoredKey = crypto:hash(sha, ClientKey), 1080 | Signature = crypto:hmac(sha, StoredKey, AuthMessage), 1081 | ClientProof = xorKeys(ClientKey, Signature, <<>>), 1082 | <<"p=", (base64:encode(ClientProof))/binary>>. 1083 | 1084 | %% @private 1085 | generate_sig(SaltedPassword, AuthMessage) -> 1086 | ServerKey = crypto:hmac(sha, SaltedPassword, "Server Key"), 1087 | crypto:hmac(sha, ServerKey, AuthMessage). 1088 | 1089 | %% @private 1090 | xorKeys(<<>>, _, Res) -> Res; 1091 | xorKeys(<>, <>, Res) -> 1092 | xorKeys(RestA, RestB, <>/binary>>). 1093 | 1094 | %% @private 1095 | parse_server_responce(Responce) -> 1096 | ParamList = binary:split(Responce, <<",">>, [global]), 1097 | lists:map( 1098 | fun(Param) -> 1099 | [K, V] = binary:split(Param, <<"=">>), 1100 | {K, V} 1101 | end, ParamList). 1102 | 1103 | pbkdf2(Password, Salt, Iterations, DerivedLength) -> 1104 | pbkdf2(Password, Salt, Iterations, DerivedLength, 1, []). 1105 | pbkdf2(Password, Salt, Iterations, DerivedLength, BlockIndex, Acc) -> 1106 | case iolist_size(Acc) > DerivedLength of 1107 | true -> 1108 | <> = iolist_to_binary(lists:reverse(Acc)), 1109 | Bin; 1110 | false -> 1111 | Block = pbkdf2(Password, Salt, Iterations, BlockIndex, 1, <<>>, <<>>), 1112 | pbkdf2(Password, Salt, Iterations, DerivedLength, BlockIndex + 1, [Block | Acc]) 1113 | end. 1114 | pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc) when Iteration > Iterations -> 1115 | Acc; 1116 | pbkdf2(Password, Salt, Iterations, BlockIndex, 1, _Prev, _Acc) -> 1117 | InitialBlock = crypto:hmac(sha,Password, <>), 1118 | pbkdf2(Password, Salt, Iterations, BlockIndex, 2, InitialBlock, InitialBlock); 1119 | pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration, Prev, Acc) -> 1120 | Next = crypto:hmac(sha,Password, Prev), 1121 | pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration + 1, Next, crypto:exor(Next, Acc)). 1122 | -------------------------------------------------------------------------------- /src/mongodb_supervisor.erl: -------------------------------------------------------------------------------- 1 | -module(mongodb_supervisor). 2 | -behavior(supervisor). 3 | -export([start_link/0, init/1]). 4 | 5 | start_link() -> 6 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 7 | 8 | 9 | init([]) -> 10 | {ok, {{one_for_one, 10, 1}, 11 | [ 12 | {mongodb, 13 | {mongodb, start, []}, 14 | permanent, 15 | 100, 16 | worker, 17 | [mongodb]} 18 | ] 19 | }}. --------------------------------------------------------------------------------