├── rebar ├── .gitignore ├── rebar.config ├── src ├── geocouch.app.src ├── vtree │ ├── README │ ├── run_vtreestats.erl │ ├── run_vtreeviz.erl │ ├── vtree_insbench.erl │ ├── vtreestats.erl │ └── vtreeviz.erl └── geocouch │ ├── couch_spatial.hrl │ ├── geocouch_duplicates.erl │ ├── couch_spatial_compactor.erl │ ├── couch_httpd_spatial_merger.erl │ ├── couch_httpd_spatial_list.erl │ ├── couch_spatial.erl │ ├── couch_httpd_spatial.erl │ ├── couch_spatial_merger.erl │ ├── couch_spatial_updater.erl │ └── couch_spatial_group.erl ├── etc └── couchdb │ └── default.d │ └── geocouch.ini ├── Makefile ├── share └── www │ └── script │ └── test │ ├── spatial_bugfixes.js │ ├── spatial_offsets.js │ ├── etags_spatial.js │ ├── spatial_design_docs.js │ ├── spatial_compaction.js │ ├── multiple_spatial_rows.js │ ├── list_spatial.js │ ├── spatial.js │ └── spatial_merging.js ├── test ├── gc_test_util.erl ├── 200-compact.t └── 100-updater.t └── README.md /rebar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jchris/geocouch/master/rebar -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.beam 2 | *~ 3 | *.orig 4 | *.rej 5 | erl_crash.dump 6 | coverage 7 | *.tar.gz 8 | tmp 9 | ebin 10 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {erl_opts, [ 2 | % Make sure test/gc_test_util.erl gets compiled 3 | {src_dirs, ["test"]} 4 | ]}. 5 | -------------------------------------------------------------------------------- /src/geocouch.app.src: -------------------------------------------------------------------------------- 1 | {application, geocouch, [ 2 | {description, "GeoCouch - A spatial index for CouchDB"}, 3 | {vsn, "0.2.0"}, 4 | {modules, []}, 5 | {registered, []}, 6 | {applications, [kernel, stdlib]}, 7 | {env, []} 8 | ]}. 9 | -------------------------------------------------------------------------------- /src/vtree/README: -------------------------------------------------------------------------------- 1 | If you want to have an visual output of your tree, you need to know the 2 | filename and the position of the root node. 3 | 4 | erl -pa PATH-TO-COUCHDB-BEAM-FILES -noshell -s run_vtreeviz run -s init stop -file /tmp/randtree.bin -pos 7581 5 | 6 | The output is a Graphviz dot file. Create a PNG out of it with: 7 | dot -Tpng -o randtree.png YOUR-DOT-FILE 8 | 9 | 10 | If you want to run the vtree tests: 11 | go to build dir and: 12 | erl -pa PATH-TO-COUCHDB-BEAM-FILES -pa PATH-TO-ETAP-BEAM-FILES -noshell -s vtree_test start -s init stop 13 | -------------------------------------------------------------------------------- /etc/couchdb/default.d/geocouch.ini: -------------------------------------------------------------------------------- 1 | [daemons] 2 | spatial_manager={couch_spatial, start_link, []} 3 | 4 | [httpd_global_handlers] 5 | _spatial_merge = {couch_httpd_spatial_merger, handle_req} 6 | 7 | [httpd_db_handlers] 8 | _spatial_cleanup = {couch_httpd_spatial, handle_spatial_cleanup_req} 9 | 10 | [httpd_design_handlers] 11 | _spatial = {couch_httpd_spatial, handle_spatial_req} 12 | _spatial/_list = {couch_httpd_spatial_list, handle_spatial_list_req} 13 | _spatial/_info = {couch_httpd_spatial, handle_design_info_req} 14 | _spatial/_compact = {couch_httpd_spatial, handle_compact_req} 15 | ;deprecated API 16 | _spatiallist = {couch_httpd_spatial_list, handle_spatial_list_req_deprecated} 17 | -------------------------------------------------------------------------------- /src/vtree/run_vtreestats.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(run_vtreestats). 14 | -export([run/0]). 15 | 16 | % Parameters are file (the filename) and pos (the position of the root node) 17 | run() -> 18 | {ok, [[Filename]]} = init:get_argument(file), 19 | {ok, [[PosString]]} = init:get_argument(pos), 20 | {Pos, _} = string:to_integer(PosString), 21 | case couch_file:open(Filename, [read]) of 22 | {ok, Fd} -> 23 | vtreestats:print(Fd, Pos), 24 | ok; 25 | {error, Reason} -> 26 | io:format("ERROR (~s): Couldn't open file (~s) for tree storage~n", 27 | [Reason, Filename]) 28 | end. 29 | -------------------------------------------------------------------------------- /src/vtree/run_vtreeviz.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(run_vtreeviz). 14 | -export([run/0]). 15 | 16 | % Parameters are file (the filename) and pos (the position of the root node) 17 | run() -> 18 | {ok, [[Filename]]} = init:get_argument(file), 19 | {ok, [[PosString]]} = init:get_argument(pos), 20 | {Pos, _} = string:to_integer(PosString), 21 | case couch_file:open(Filename, [read]) of 22 | {ok, Fd} -> 23 | vtreeviz:visualize(Fd, Pos), 24 | ok; 25 | {error, Reason} -> 26 | io:format("ERROR (~s): Couldn't open file (~s) for tree storage~n", 27 | [Reason, Filename]) 28 | end. 29 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ERL=erl 2 | VERSION=$(shell git describe) 3 | # Output ERL_COMPILER_OPTIONS env variable 4 | COMPILER_OPTIONS=$(shell $(ERL) -noinput +B -eval 'Options = case os:getenv("ERL_COMPILER_OPTIONS") of false -> []; Else -> {ok,Tokens,_} = erl_scan:string(Else ++ "."),{ok,Term} = erl_parse:parse_term(Tokens), Term end, io:format("~p~n", [[{i, "${COUCH_SRC}"}, {i, "${COUCH_SRC}/.."}] ++ Options]), halt(0).') 5 | COMPILER_OPTIONS_MAKE_CHECK=$(shell $(ERL) -noinput +B -eval 'Options = case os:getenv("ERL_COMPILER_OPTIONS") of false -> []; Else -> {ok,Tokens,_} = erl_scan:string(Else ++ "."),{ok,Term} = erl_parse:parse_term(Tokens), Term end, io:format("~p~n", [[{i, "${COUCH_SRC}"},{i, "${COUCH_SRC}/.."},{d, makecheck}] ++ Options]), halt(0).') 6 | 7 | all: compile 8 | 9 | compile: 10 | ERL_COMPILER_OPTIONS='$(COMPILER_OPTIONS)' ./rebar compile 11 | 12 | compileforcheck: 13 | ERL_COMPILER_OPTIONS='$(COMPILER_OPTIONS_MAKE_CHECK)' ./rebar compile 14 | 15 | buildandtest: all test 16 | 17 | runtests: 18 | ERL_FLAGS="-pa ebin -pa ${COUCH_SRC} -pa ${COUCH_SRC}/../etap -pa ${COUCH_SRC}/../snappy -pa ${COUCH_SRC}/../../test/etap -pa ${COUCH_SRC}/../couch_set_view/ebin -pa ${COUCH_SRC}/../couch_index_merger/ebin -pa ${COUCH_SRC}/../mochiweb -pa ${COUCH_SRC}/../lhttpc -pa ${COUCH_SRC}/../erlang-oauth -pa ${COUCH_SRC}/../ejson -pa ${COUCH_SRC}/../mapreduce" prove ./test/*.t 19 | 20 | check: clean compileforcheck runtests 21 | ./rebar clean 22 | 23 | clean: 24 | ./rebar clean 25 | rm -f *.tar.gz 26 | 27 | geocouch-$(VERSION).tar.gz: 28 | git archive --prefix=geocouch-$(VERSION)/ --format tar HEAD | gzip -9vc > $@ 29 | 30 | dist: geocouch-$(VERSION).tar.gz 31 | -------------------------------------------------------------------------------- /src/vtree/vtree_insbench.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(vtree_insbench). 14 | -export([start/0]). 15 | 16 | -export([test_insertion/0, profile_insertion/0]). 17 | 18 | -define(FILENAME, "/tmp/vtree_huge.bin"). 19 | 20 | -record(node, { 21 | % type = inner | leaf 22 | type=leaf}). 23 | 24 | start() -> 25 | %test_insertion(), 26 | %profile_insertion(), 27 | etap:end_tests(). 28 | 29 | test_insertion() -> 30 | etap:plan(1), 31 | 32 | case couch_file:open(?FILENAME, [create, overwrite]) of 33 | {ok, Fd} -> 34 | Max = 1000, 35 | Tree = lists:foldl( 36 | fun(Count, CurTreePos) -> 37 | RandomMbr = {random:uniform(Max), random:uniform(Max), 38 | random:uniform(Max), random:uniform(Max)}, 39 | %io:format("~p~n", [RandomMbr]), 40 | {ok, _, NewRootPos} = vtree:insert( 41 | Fd, CurTreePos, 42 | {RandomMbr, #node{type=leaf}, 43 | list_to_binary("Node" ++ integer_to_list(Count))}), 44 | %io:format("test_insertion: ~p~n", [NewRootPos]), 45 | NewRootPos 46 | end, -1, lists:seq(1,5000)), 47 | %end, -1, lists:seq(1,60000)), 48 | io:format("Tree: ~p~n", [Tree]), 49 | ok; 50 | {error, _Reason} -> 51 | io:format("ERROR: Couldn't open file (~s) for tree storage~n", 52 | [?FILENAME]) 53 | end. 54 | 55 | profile_insertion() -> 56 | fprof:apply(insertion, test_insertion, []), 57 | fprof:profile(), 58 | fprof:analyse(). 59 | -------------------------------------------------------------------------------- /src/geocouch/couch_spatial.hrl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -define(LATEST_SPATIAL_DISK_VERSION, 2). 14 | 15 | % The counterpart to #spatial_group in the view server is #group 16 | -record(spatial_group, { 17 | sig=nil, 18 | % XXX vmx 2011-11-30: CouchDB doesn't need 'db' any more. 19 | % Check if/why GeoCouch still needs it. 20 | db=nil, 21 | fd=nil, 22 | name, % design document ID 23 | def_lang, 24 | design_options=[], 25 | indexes, 26 | lib, 27 | id_btree=nil, % the back-index 28 | current_seq=0, 29 | purge_seq=0 30 | % waiting_delayed_commit=nil 31 | }). 32 | 33 | % It's the tree strucure of the spatial index 34 | % The counterpart to #spatial in the view server is #view 35 | -record(spatial, { 36 | root_dir=nil, 37 | seq=0, 38 | treepos=nil, 39 | treeheight=0, % height of the tree 40 | def=nil, % The function in the query/view server 41 | index_names=[], 42 | id_num=0, % comes from couch_spatial_group requirements 43 | update_seq=0, % comes from couch_spatial_group requirements 44 | purge_seq=0, % comes from couch_spatial_group requirements 45 | % Store the FD from the group within the index as well, so we don't have 46 | % to pass on the group when we only want the FD to write to/read from 47 | fd=nil 48 | }). 49 | 50 | % The counterpart to #spatial_index_header in the view server is #index_header 51 | -record(spatial_index_header, { 52 | seq=0, 53 | purge_seq=0, 54 | id_btree_state=nil, % pointer/position in file to back-index 55 | % One #spatial record for every index that is stripped by the information 56 | % that can be retrieved from a Design Document or during runtime. 57 | % Only the fields that need to persisted will have sane values 58 | index_states=nil, 59 | disk_version = ?LATEST_SPATIAL_DISK_VERSION 60 | }). 61 | 62 | % The counterpart to #spatial_query_args in the view server is 63 | % #view_query_args 64 | -record(spatial_query_args, { 65 | bbox=nil, 66 | stale=nil, 67 | count=false, 68 | % Bounds of the cartesian plane 69 | bounds=nil, 70 | limit = 10000000000, % Huge number to simplify logic 71 | skip = 0 72 | }). 73 | 74 | % The counterpart to #spatial_fold_helper_funs in the view server is 75 | % #view__fold_helper_funs 76 | -record(spatial_fold_helper_funs, { 77 | start_response, 78 | send_row 79 | }). 80 | -------------------------------------------------------------------------------- /src/vtree/vtreestats.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(vtreestats). 14 | 15 | -export([print/2, leaf_depths/2]). 16 | 17 | -record(stats, { 18 | % number of children in inner nodes 19 | numinner = [], 20 | % number of children in leaf nodes 21 | numleafs = [], 22 | % depth of the leaf nodes 23 | depth = [] 24 | }). 25 | 26 | 27 | print(Fd, ParentPos) -> 28 | Stats = stats(Fd, ParentPos), 29 | Inner = Stats#stats.numinner, 30 | Leafs = Stats#stats.numleafs, 31 | Depth = Stats#stats.depth, 32 | io:format("Result: ~w~n", [Stats]), 33 | io:format("innernodes (~w)~n", [length(Inner)]), 34 | io:format(" sum: ~w~n", [lists:sum(Inner)]), 35 | io:format(" avg (min, max): ~.1f (~w, ~w)~n", 36 | [lists:sum(Inner)/length(Inner), lists:min(Inner), lists:max(Inner)]), 37 | 38 | io:format("leafs (~w)~n", [length(Leafs)]), 39 | io:format(" sum: ~w~n", [lists:sum(Leafs)]), 40 | io:format(" avgnum (min, max): ~.1f (~w, ~w)~n", 41 | [lists:sum(Leafs)/length(Leafs), lists:min(Leafs), lists:max(Leafs)]), 42 | io:format(" avgdepth (min, max): ~.1f (~w, ~w)~n", 43 | [lists:sum(Depth)/length(Depth), lists:min(Depth), lists:max(Depth)]). 44 | 45 | 46 | % @doc Returns a list of leaf node depth (one value for every depth) 47 | -spec leaf_depths(Fd::file:io_device(), integer) -> [integer()]. 48 | leaf_depths(Fd, RootPos) -> 49 | Stats = stats(Fd, RootPos), 50 | sets:to_list(sets:from_list(Stats#stats.depth)). 51 | 52 | 53 | stats(Fd, RootPos) -> 54 | Stats = stats(Fd, RootPos, 0, #stats{}), 55 | Stats#stats{numinner=lists:reverse(Stats#stats.numinner), 56 | numleafs=lists:reverse(Stats#stats.numleafs), 57 | depth=lists:reverse(Stats#stats.depth)}. 58 | 59 | stats(Fd, RootPos, Depth, Stats) -> 60 | {ok, Parent} = couch_file:pread_term(Fd, RootPos), 61 | {_ParentMbr, _ParentMeta, EntriesPos} = Parent, 62 | 63 | if 64 | % leaf node 65 | is_tuple(hd(EntriesPos)) -> 66 | Stats#stats{numleafs=[length(EntriesPos)|Stats#stats.numleafs], 67 | depth=[Depth|Stats#stats.depth]}; 68 | % inner node 69 | true -> 70 | Stats4 = lists:foldl(fun(EntryPos, Stats2) -> 71 | stats(Fd, EntryPos, Depth+1, Stats2) 72 | end, Stats, EntriesPos), 73 | Stats4#stats{numinner=[length(EntriesPos)|Stats4#stats.numinner]} 74 | end. 75 | -------------------------------------------------------------------------------- /src/vtree/vtreeviz.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(vtreeviz). 14 | 15 | -export([visualize/2]). 16 | 17 | -record(node, { 18 | % type = inner | leaf 19 | type = inner 20 | }). 21 | 22 | 23 | visualize(Fd, ParentPos) -> 24 | io:format("digraph G~n{~n node [shape = record];~n", []), 25 | print_children(Fd, ParentPos), 26 | io:format("}~n", []), 27 | ok. 28 | 29 | get_children(Fd, Pos) -> 30 | {ok, {_RootMbr, _RootMeta, Children}} = couch_file:pread_term(Fd, Pos), 31 | Children. 32 | 33 | print_children(Fd, ParentPos) -> 34 | ChildrenPos = get_children(Fd, ParentPos), 35 | ChildrenLabels = if is_integer(hd(ChildrenPos)) -> 36 | ChildrenMbrMeta = lists:map(fun(ChildPos) -> 37 | %io:format("ChildPos: ~p~n", [ChildPos]), 38 | {ok, {Mbr, Meta, _Children}} = couch_file:pread_term(Fd, ChildPos), 39 | {Mbr, Meta} 40 | end, ChildrenPos), 41 | node_labels(ChildrenPos, ChildrenMbrMeta); 42 | true -> 43 | node_labels(ChildrenPos) 44 | end, 45 | io:format("node~w [label=\"{~s}\"];~n", [ParentPos, ChildrenLabels]), 46 | print_edges(Fd, ParentPos, ChildrenPos). 47 | 48 | % leaf nodes 49 | node_labels(Children) -> 50 | string_join("|", Children, fun({Mbr, Meta, {Id, _Val}}) -> 51 | io_lib:format("~s ~w ~w", [Id, tuple_to_list(Mbr), Meta#node.type]) 52 | end). 53 | 54 | % inner nodes 55 | node_labels(ChildrenPos, ChildrenMbrMeta) -> 56 | Children = lists:zip(ChildrenPos, ChildrenMbrMeta), 57 | ChildrenLabels = lists:map(fun({ChildPos, {ChildMbr, ChildMeta}}) -> 58 | io_lib:format("~w ~w ~w", [ChildPos, ChildPos, 59 | tuple_to_list(ChildMbr), ChildMeta#node.type]) 60 | end, Children), 61 | string_join("|", ChildrenLabels). 62 | 63 | print_edges(Fd, ParentPos, Children) -> 64 | lists:foreach(fun(ChildPos) -> 65 | if is_integer(ChildPos) -> 66 | io:format("node~w:f~w -> node~w~n", [ParentPos, ChildPos, ChildPos]), 67 | print_children(Fd, ChildPos); 68 | true -> 69 | ok 70 | end 71 | end, Children). 72 | 73 | 74 | % From http://www.trapexit.org/String_join_with (2010-03-12) 75 | string_join(Join, L) -> 76 | string_join(Join, L, fun(E) -> E end). 77 | 78 | string_join(_Join, L=[], _Conv) -> 79 | L; 80 | string_join(Join, [H|Q], Conv) -> 81 | lists:flatten(lists:concat( 82 | [Conv(H)|lists:map(fun(E) -> [Join, Conv(E)] end, Q)] 83 | )). 84 | -------------------------------------------------------------------------------- /share/www/script/test/spatial_bugfixes.js: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | // use this file except in compliance with the License. You may obtain a copy of 3 | // the License at 4 | // 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // 7 | // Unless required by applicable law or agreed to in writing, software 8 | // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | // License for the specific language governing permissions and limitations under 11 | // the License. 12 | 13 | // These tests are here to make sure bugs are fixed 14 | couchTests.spatial_bugfixes = function(debug) { 15 | var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"true"}); 16 | db.deleteDb(); 17 | db.createDb(); 18 | 19 | if (debug) debugger; 20 | 21 | 22 | var designDoc = { 23 | _id:"_design/spatial", 24 | language: "javascript", 25 | spatial : { 26 | points : (function(doc) { 27 | if (doc.loc) { 28 | emit({ 29 | type: "Point", 30 | coordinates: [doc.loc[0], doc.loc[1]] 31 | }, doc._id); 32 | } 33 | }).toString() 34 | } 35 | }; 36 | 37 | T(db.save(designDoc).ok); 38 | 39 | 40 | function makeSpatialDocs(start, end, templateDoc) { 41 | var docs = makeDocs(start, end, templateDoc); 42 | for (var i=0; i total, but limit < total"); 101 | }; 102 | -------------------------------------------------------------------------------- /share/www/script/test/etags_spatial.js: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | // use this file except in compliance with the License. You may obtain a copy of 3 | // the License at 4 | // 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // 7 | // Unless required by applicable law or agreed to in writing, software 8 | // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | // License for the specific language governing permissions and limitations under 11 | // the License. 12 | 13 | couchTests.etags_spatial = function(debug) { 14 | var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"true"}); 15 | db.deleteDb(); 16 | db.createDb(); 17 | if (debug) debugger; 18 | 19 | var designDoc = { 20 | _id:"_design/etags", 21 | language: "javascript", 22 | spatial: { 23 | basicIndex: stringFun(function(doc) { 24 | emit({ 25 | type: "Point", 26 | coordinates: [doc.loc[0], doc.loc[1]] 27 | }, doc.string); 28 | }), 29 | fooIndex: stringFun(function(doc) { 30 | if (doc.foo) { 31 | emit({ 32 | type: "Point", 33 | coordinates: [1, 2] 34 | }, 1); 35 | } 36 | }) 37 | } 38 | }; 39 | T(db.save(designDoc).ok); 40 | 41 | function makeSpatialDocs(start, end, templateDoc) { 42 | var docs = makeDocs(start, end, templateDoc); 43 | for (var i=0; i5) { 38 | emit({ 39 | type: "Point", 40 | coordinates: [doc.loc[0], doc.loc[1]] 41 | }, doc.string); 42 | } 43 | }), 44 | emitNothing : stringFun(function(doc) {}), 45 | geoJsonGeoms : stringFun(function(doc) { 46 | if (doc._id.substr(0,3)=="geo") { 47 | emit(doc.geom, doc.string); 48 | } 49 | }) 50 | /* This is a 1.1.x feature, disable for now 51 | withCommonJs : stringFun(function(doc) { 52 | var lib = require('views/lib/geo'); 53 | emit({ 54 | type: lib.type, 55 | coordinates: [doc.loc[0], doc.loc[1]] 56 | }, doc.string); 57 | })*/ 58 | } 59 | }; 60 | 61 | T(db.save(designDoc).ok); 62 | 63 | 64 | function makeSpatialDocs(start, end, templateDoc) { 65 | var docs = makeDocs(start, end, templateDoc); 66 | for (var i=0; i 0); 113 | TEquals(false, sinfo.compact_running); 114 | // test that GET /db/_design/test/_info 115 | // hasn't triggered an update of the views 116 | wait(3); 117 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" 118 | + bbox.join(",") + '&stale=ok'); 119 | T(JSON.parse(xhr.responseText).rows.length === 0); 120 | 121 | // test that POST /db/_spatial_cleanup 122 | // doesn't trigger an update of the views 123 | xhr = CouchDB.request("GET", '/test_suite_db/_spatial_cleanup'); 124 | wait(3); 125 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" 126 | + bbox.join(",") + '&stale=ok'); 127 | T(JSON.parse(xhr.responseText).rows.length === 0); 128 | }; 129 | -------------------------------------------------------------------------------- /src/geocouch/geocouch_duplicates.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(geocouch_duplicates). 14 | 15 | % This module contains functions that are needed by GeoCouch but that 16 | % are not exported by CouchDB. Here's the place to put the code from 17 | % those functions. 18 | 19 | -include("couch_db.hrl"). 20 | -export([start_list_resp/6, send_non_empty_chunk/2, sort_lib/1, 21 | make_arity_3_fun/1, parse_int_param/1, parse_positive_int_param/1]). 22 | 23 | % From couch_httpd_show 24 | start_list_resp(QServer, LName, Req, Db, Head, Etag) -> 25 | JsonReq = couch_httpd_external:json_req_obj(Req, Db), 26 | [<<"start">>,Chunks,JsonResp] = couch_query_servers:ddoc_proc_prompt(QServer, 27 | [<<"lists">>, LName], [Head, JsonReq]), 28 | JsonResp2 = apply_etag(JsonResp, Etag), 29 | #extern_resp_args{ 30 | code = Code, 31 | ctype = CType, 32 | headers = ExtHeaders 33 | } = couch_httpd_external:parse_external_response(JsonResp2), 34 | JsonHeaders = couch_httpd_external:default_or_content_type(CType, ExtHeaders), 35 | {ok, Resp} = couch_httpd:start_chunked_response(Req, Code, JsonHeaders), 36 | {ok, Resp, ?b2l(?l2b(Chunks))}. 37 | % Needed for start_list_resp/6 38 | apply_etag({ExternalResponse}, CurrentEtag) -> 39 | % Here we embark on the delicate task of replacing or creating the 40 | % headers on the JsonResponse object. We need to control the Etag and 41 | % Vary headers. If the external function controls the Etag, we'd have to 42 | % run it to check for a match, which sort of defeats the purpose. 43 | case couch_util:get_value(<<"headers">>, ExternalResponse, nil) of 44 | nil -> 45 | % no JSON headers 46 | % add our Etag and Vary headers to the response 47 | {[{<<"headers">>, {[{<<"Etag">>, CurrentEtag}, {<<"Vary">>, <<"Accept">>}]}} | ExternalResponse]}; 48 | JsonHeaders -> 49 | {[case Field of 50 | {<<"headers">>, JsonHeaders} -> % add our headers 51 | JsonHeadersEtagged = couch_util:json_apply_field({<<"Etag">>, CurrentEtag}, JsonHeaders), 52 | JsonHeadersVaried = couch_util:json_apply_field({<<"Vary">>, <<"Accept">>}, JsonHeadersEtagged), 53 | {<<"headers">>, JsonHeadersVaried}; 54 | _ -> % skip non-header fields 55 | Field 56 | end || Field <- ExternalResponse]} 57 | end. 58 | 59 | 60 | % From couch_httpd_show 61 | send_non_empty_chunk(Resp, Chunk) -> 62 | case Chunk of 63 | [] -> ok; 64 | _ -> couch_httpd:send_chunk(Resp, Chunk) 65 | end. 66 | 67 | % From couch_view_group 68 | sort_lib({Lib}) -> 69 | sort_lib(Lib, []). 70 | sort_lib([], LAcc) -> 71 | lists:keysort(1, LAcc); 72 | sort_lib([{LName, {LObj}}|Rest], LAcc) -> 73 | LSorted = sort_lib(LObj, []), % descend into nested object 74 | sort_lib(Rest, [{LName, LSorted}|LAcc]); 75 | sort_lib([{LName, LCode}|Rest], LAcc) -> 76 | sort_lib(Rest, [{LName, LCode}|LAcc]). 77 | 78 | % From couch_httpd (will be exported from 1.1.x on) 79 | make_arity_3_fun(SpecStr) -> 80 | case couch_util:parse_term(SpecStr) of 81 | {ok, {Mod, Fun, SpecArg}} -> 82 | fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end; 83 | {ok, {Mod, Fun}} -> 84 | fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end 85 | end. 86 | 87 | % From couch_httpd_view 88 | parse_int_param(Val) -> 89 | case (catch list_to_integer(Val)) of 90 | IntVal when is_integer(IntVal) -> 91 | IntVal; 92 | _ -> 93 | Msg = io_lib:format("Invalid value for integer parameter: ~p", [Val]), 94 | throw({query_parse_error, ?l2b(Msg)}) 95 | end. 96 | 97 | % From couch_httpd_view 98 | parse_positive_int_param(Val) -> 99 | case parse_int_param(Val) of 100 | IntVal when IntVal >= 0 -> 101 | IntVal; 102 | _ -> 103 | Fmt = "Invalid value for positive integer parameter: ~p", 104 | Msg = io_lib:format(Fmt, [Val]), 105 | throw({query_parse_error, ?l2b(Msg)}) 106 | end. 107 | -------------------------------------------------------------------------------- /share/www/script/test/spatial_compaction.js: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | // use this file except in compliance with the License. You may obtain a copy of 3 | // the License at 4 | // 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // 7 | // Unless required by applicable law or agreed to in writing, software 8 | // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | // License for the specific language governing permissions and limitations under 11 | // the License. 12 | 13 | couchTests.spatial_compaction = function(debug) { 14 | 15 | if (debug) debugger; 16 | 17 | var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit": "true"}); 18 | 19 | db.deleteDb(); 20 | db.createDb(); 21 | 22 | var ddoc = { 23 | _id: "_design/compaction", 24 | language: "javascript", 25 | spatial: { 26 | basicIndex: (function(doc) { 27 | emit({ 28 | type: "Point", 29 | coordinates: doc.loc 30 | }, doc.string); 31 | }).toString(), 32 | fooIndex: (function(doc) { 33 | if (doc._id<500) { 34 | emit({ 35 | type: "Point", 36 | coordinates: [1, 2] 37 | }, 1); 38 | } 39 | }).toString() 40 | } 41 | }; 42 | T(db.save(ddoc).ok); 43 | 44 | function makeSpatialDocs(start, end, templateDoc) { 45 | var docs = makeDocs(start, end, templateDoc); 46 | for (var i=0; i string(). 25 | gc_config_file() -> 26 | "etc/couchdb/default.d/geocouch.ini". 27 | 28 | init_code_path() -> 29 | BuildDir = filename:join(root_dir() ++ ["build"]), 30 | code:add_pathz(BuildDir). 31 | 32 | % @doc Returns the root directory of GeoCouch as a list. It makes the 33 | % assumptions that the currently running test is in /test/thetest.t 34 | -spec root_dir() -> [file:filename()]. 35 | root_dir() -> 36 | EscriptName = filename:split(filename:absname(escript:script_name())), 37 | lists:sublist(EscriptName, length(EscriptName)-2). 38 | 39 | % @doc Create a random node. Return the ID of the node and the node itself. 40 | -spec random_node() -> {string(), tuple()}. 41 | random_node() -> 42 | random_node({654, 642, 698}). 43 | -spec random_node(Seed::{integer(), integer(), integer()}) -> {string(), tuple()}. 44 | random_node(Seed) -> 45 | random:seed(Seed), 46 | Max = 1000, 47 | {W, X, Y, Z} = {random:uniform(Max), random:uniform(Max), 48 | random:uniform(Max), random:uniform(Max)}, 49 | RandomMbr = {erlang:min(W, X), erlang:min(Y, Z), 50 | erlang:max(W, X), erlang:max(Y, Z)}, 51 | RandomLineString = {linestring, [[erlang:min(W, X), erlang:min(Y, Z)], 52 | [erlang:max(W, X), erlang:max(Y, Z)]]}, 53 | {list_to_binary("Node" ++ integer_to_list(Y) ++ integer_to_list(Z)), 54 | {RandomMbr, #node{type=leaf}, RandomLineString, 55 | list_to_binary("Value" ++ integer_to_list(Y) ++ integer_to_list(Z))}}. 56 | 57 | -spec build_random_tree(Filename::string(), Num::integer()) -> 58 | {ok, {file:io_device(), {integer(), integer()}}} | {error, string()}. 59 | build_random_tree(Filename, Num) -> 60 | % The random seed generator changed in R15 (erts 5.9) 61 | case erlang:system_info(version) >= "5.9" of 62 | true -> build_random_tree(Filename, Num, {86880, 81598, 91188}); 63 | false -> build_random_tree(Filename, Num, {654, 642, 698}) 64 | end. 65 | -spec build_random_tree(Filename::string(), Num::integer(), 66 | Seed::{integer(), integer(), integer()}) -> 67 | {ok, {file:io_device(), {integer(), integer()}}} | {error, string()}. 68 | build_random_tree(Filename, Num, Seed) -> 69 | random:seed(Seed), 70 | case couch_file:open(Filename, [create, overwrite]) of 71 | {ok, Fd} -> 72 | Max = 1000, 73 | {Tree, TreeHeight} = lists:foldl( 74 | fun(Count, {CurTreePos, _CurTreeHeight}) -> 75 | {W, X, Y, Z} = {random:uniform(Max), random:uniform(Max), 76 | random:uniform(Max), random:uniform(Max)}, 77 | RandomMbr = {erlang:min(W, X), erlang:min(Y, Z), 78 | erlang:max(W, X), erlang:max(Y, Z)}, 79 | RandomLineString = {linestring, 80 | [[erlang:min(W, X), erlang:min(Y, Z)], 81 | [erlang:max(W, X), erlang:max(Y, Z)]]}, 82 | %io:format("~p~n", [RandomMbr]), 83 | {ok, _, NewRootPos, NewTreeHeight} = vtree:insert( 84 | Fd, CurTreePos, 85 | list_to_binary("Node" ++ integer_to_list(Count)), 86 | {RandomMbr, #node{type=leaf}, RandomLineString, 87 | list_to_binary("Node" ++ integer_to_list(Count))}), 88 | %io:format("test_insertion: ~p~n", [NewRootPos]), 89 | {NewRootPos, NewTreeHeight} 90 | end, {nil, 0}, lists:seq(1,Num)), 91 | %io:format("Tree: ~p~n", [Tree]), 92 | {ok, {Fd, {Tree, TreeHeight}}}; 93 | {error, _Reason} -> 94 | io:format("ERROR: Couldn't open file (~s) for tree storage~n", 95 | [Filename]) 96 | end. 97 | 98 | 99 | lookup(Fd, Pos, Bbox) -> 100 | % default function returns a list of 2-tuple with 101 | % - 2-tuple with MBR and document ID 102 | % - 2-tuple with the geometry and the actual value 103 | vtree:lookup(Fd, Pos, Bbox, {fun({{Bbox2, DocId}, {Geom, Value}}, Acc) -> 104 | % NOTE vmx (2011-02-09) This should perhaps also be changed from 105 | % {Bbox2, DocId, Geom, Value} to {{Bbox2, DocId}, {Geom, Value}} 106 | Acc2 = [{Bbox2, DocId, Geom, Value}|Acc], 107 | {ok, Acc2} 108 | end, []}, nil). 109 | -------------------------------------------------------------------------------- /src/geocouch/couch_spatial_compactor.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(couch_spatial_compactor). 14 | 15 | -include ("couch_db.hrl"). 16 | -include ("couch_spatial.hrl"). 17 | 18 | -export([start_compact/2]). 19 | 20 | %% @spec start_compact( 21 | %% DbName::binary()|{DbName::binary(), GroupDbName::binary()}, 22 | %% GroupId::binary()) -> ok 23 | %% @doc Compacts the spatial indexes. GroupId must not include the _design/ 24 | %% prefix 25 | % For foreign Design Document 26 | start_compact({DbName, GroupDbName}, GroupId) -> 27 | Pid = couch_spatial:get_group_server({DbName, GroupDbName}, GroupId), 28 | CompactFun = fun(Group, EmptyGroup, _) -> 29 | compact_group(Group, EmptyGroup, {DbName, GroupDbName}) 30 | end, 31 | gen_server:cast(Pid, {start_compact, CompactFun}); 32 | start_compact(DbName, GroupId) -> 33 | Pid = couch_spatial:get_group_server( 34 | DbName, <<"_design/",GroupId/binary>>), 35 | gen_server:cast(Pid, {start_compact, fun compact_group/3}). 36 | 37 | %%============================================================================= 38 | %% internal functions 39 | %%============================================================================= 40 | 41 | % For foreign Design Documents (stored in a different DB) 42 | docs_db_name({DocsDbName, _DDocDbName}) -> 43 | DocsDbName; 44 | docs_db_name(DbName) when is_binary(DbName) -> 45 | DbName. 46 | 47 | %% @spec compact_group(Group, NewGroup) -> ok 48 | compact_group(Group, EmptyGroup, DbName) -> 49 | #spatial_group{ 50 | current_seq = Seq, 51 | id_btree = IdBtree, 52 | name = GroupId, 53 | indexes = Indexes, 54 | fd = Fd 55 | } = Group, 56 | 57 | #spatial_group{ 58 | id_btree = EmptyIdBtree, 59 | indexes = EmptyIndexes, 60 | fd = EmptyFd 61 | } = EmptyGroup, 62 | 63 | DbName1 = docs_db_name(DbName), 64 | {ok, Db} = couch_db:open_int(DbName1, []), 65 | {ok, <>} = 66 | couch_btree:full_reduce(Db#db.docinfo_by_id_btree), 67 | 68 | % Use "view_compaction" for now, that it shows up in Futons active tasks 69 | % screen. Think about a more generic way for the future. 70 | couch_task_status:add_task([ 71 | {type, view_compaction}, 72 | {database, DbName1}, 73 | {design_document, GroupId}, 74 | {progress, 0} 75 | ]), 76 | % Create a new version of the lookup tree (the ID B-tree) 77 | Fun = fun({DocId, _IndexIdKeys} = KV, {Bt, Acc, TotalCopied, _LastId}) -> 78 | % NOTE vmx (2011-01-18): use the same value as for view compaction, 79 | % though wondering why a value of 10000 is hard-coded 80 | if TotalCopied rem 10000 =:= 0 -> 81 | couch_task_status:update([ 82 | {progress, (TotalCopied*100) div Count}]), 83 | {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV|Acc])), 84 | {ok, {Bt2, [], TotalCopied+1, DocId}}; 85 | true -> 86 | {ok, {Bt, [KV|Acc], TotalCopied+1, DocId}} 87 | end 88 | end, 89 | {ok, _, {Bt3, Uncopied, _Total, _LastId}} = couch_btree:foldl(IdBtree, Fun, 90 | {EmptyIdBtree, [], 0, nil}), 91 | couch_file:flush(Bt3#btree.fd), 92 | {ok, NewIdBtree} = couch_btree:add(Bt3, lists:reverse(Uncopied)), 93 | couch_file:flush(NewIdBtree#btree.fd), 94 | 95 | NewIndexes = lists:map(fun({Index, EmptyIndex}) -> 96 | case Index#spatial.treepos of 97 | % Tree is empty, just grab the the FD 98 | nil -> EmptyIndex#spatial{fd = EmptyFd}; 99 | _ -> compact_spatial(Fd, EmptyFd, Index, EmptyIndex) 100 | end 101 | end, lists:zip(Indexes, EmptyIndexes)), 102 | 103 | NewGroup = EmptyGroup#spatial_group{ 104 | id_btree=NewIdBtree, 105 | indexes=NewIndexes, 106 | current_seq=Seq 107 | }, 108 | 109 | Pid = couch_spatial:get_group_server(DbName, GroupId), 110 | gen_server:cast(Pid, {compact_done, NewGroup}). 111 | 112 | %% @spec compact_spatial(Index, EmptyIndex) -> CompactView 113 | compact_spatial(OldFd, NewFd, Index, EmptyIndex) -> 114 | {ok, Count} = couch_spatial:get_item_count(OldFd, Index#spatial.treepos), 115 | 116 | Fun = fun(Node, {TreePos, TreeHeight, Acc, TotalCopied}) -> 117 | if TotalCopied rem 10000 =:= 0 -> 118 | couch_task_status:update([ 119 | {progress, (TotalCopied*100) div Count}]), 120 | {ok, TreePos2, TreeHeight2} = vtree_bulk:bulk_load( 121 | NewFd, TreePos, TreeHeight, [Node|Acc]), 122 | {TreePos2, TreeHeight2, [], TotalCopied + 1}; 123 | true -> 124 | {TreePos, TreeHeight, [Node|Acc], TotalCopied + 1} 125 | end 126 | end, 127 | 128 | {TreePos3, TreeHeight3, Uncopied, _Total} = vtree:foldl( 129 | OldFd, Index#spatial.treepos, Fun, 130 | {EmptyIndex#spatial.treepos, EmptyIndex#spatial.treeheight, [], 0}), 131 | {ok, NewTreePos, NewTreeHeight} = vtree_bulk:bulk_load( 132 | NewFd, TreePos3, TreeHeight3, Uncopied), 133 | EmptyIndex#spatial{ 134 | treepos = NewTreePos, 135 | treeheight = NewTreeHeight, 136 | fd = NewFd 137 | }. 138 | -------------------------------------------------------------------------------- /share/www/script/test/multiple_spatial_rows.js: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | // use this file except in compliance with the License. You may obtain a copy of 3 | // the License at 4 | // 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // 7 | // Unless required by applicable law or agreed to in writing, software 8 | // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | // License for the specific language governing permissions and limitations under 11 | // the License. 12 | 13 | couchTests.multiple_spatial_rows = function(debug) { 14 | var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"true"}); 15 | db.deleteDb(); 16 | db.createDb(); 17 | if (debug) debugger; 18 | 19 | var designDoc = { 20 | _id: "_design/multirows", 21 | language: "javascript", 22 | spatial: { 23 | sameKey: (function(doc) { 24 | for (var i=0; i 32 | Indexes = validate_spatial_param(qs_json_value(Req, "spatial", nil)), 33 | DDocRevision = couch_index_merger:validate_revision_param( 34 | qs_json_value(Req, <<"ddoc_revision">>, nil)), 35 | MergeParams0 = #index_merge{ 36 | indexes = Indexes, 37 | ddoc_revision = DDocRevision 38 | }, 39 | MergeParams1 = couch_httpd_view_merger:apply_http_config( 40 | Req, [], MergeParams0), 41 | couch_index_merger:query_index(couch_spatial_merger, MergeParams1, Req); 42 | 43 | handle_req(#httpd{method = 'POST'} = Req) -> 44 | couch_httpd:validate_ctype(Req, "application/json"), 45 | {Props} = couch_httpd:json_body_obj(Req), 46 | Indexes = validate_spatial_param(get_value(<<"spatial">>, Props)), 47 | DDocRevision = couch_index_merger:validate_revision_param( 48 | get_value(<<"ddoc_revision">>, Props, nil)), 49 | MergeParams0 = #index_merge{ 50 | indexes = Indexes, 51 | ddoc_revision = DDocRevision 52 | }, 53 | MergeParams1 = couch_httpd_view_merger:apply_http_config( 54 | Req, Props, MergeParams0), 55 | couch_index_merger:query_index(couch_spatial_merger, MergeParams1, Req); 56 | 57 | handle_req(Req) -> 58 | couch_httpd:send_method_not_allowed(Req, "GET,POST"). 59 | 60 | %% Valid `spatial` example: 61 | %% 62 | %% { 63 | %% "spatial": { 64 | %% "localdb1": ["ddocname/spatialname", ...], 65 | %% "http://server2/dbname": ["ddoc/spatial"], 66 | %% "http://server2/_spatial_merge": { 67 | %% "spatial": { 68 | %% "localdb3": "spatialname", // local to server2 69 | %% "localdb4": "spatialname" // local to server2 70 | %% } 71 | %% } 72 | %% } 73 | %% } 74 | 75 | validate_spatial_param({[_ | _] = Indexes}) -> 76 | lists:flatten(lists:map( 77 | fun({DbName, SpatialName}) when is_binary(SpatialName) -> 78 | {DDocDbName, DDocId, Vn} = parse_spatial_name(SpatialName), 79 | #simple_index_spec{ 80 | database = DbName, ddoc_id = DDocId, index_name = Vn, 81 | ddoc_database = DDocDbName 82 | }; 83 | ({DbName, SpatialNames}) when is_list(SpatialNames) -> 84 | lists:map( 85 | fun(SpatialName) -> 86 | {DDocDbName, DDocId, Vn} = parse_spatial_name(SpatialName), 87 | #simple_index_spec{ 88 | database = DbName, ddoc_id = DDocId, index_name = Vn, 89 | ddoc_database = DDocDbName 90 | } 91 | end, SpatialNames); 92 | ({MergeUrl, {[_ | _] = Props} = EJson}) -> 93 | case (catch lhttpc_lib:parse_url(?b2l(MergeUrl))) of 94 | #lhttpc_url{} -> 95 | ok; 96 | _ -> 97 | throw({bad_request, "Invalid spatial merge definition object."}) 98 | end, 99 | case get_value(<<"ddoc_revision">>, Props) of 100 | undefined -> 101 | ok; 102 | _ -> 103 | Msg = "Nested 'ddoc_revision' specifications are not allowed.", 104 | throw({bad_request, Msg}) 105 | end, 106 | case get_value(<<"spatial">>, Props) of 107 | {[_ | _]} = SubSpatial -> 108 | SubSpatialSpecs = validate_spatial_param(SubSpatial), 109 | case lists:any( 110 | fun(#simple_index_spec{}) -> true; (_) -> false end, 111 | SubSpatialSpecs) of 112 | true -> 113 | ok; 114 | false -> 115 | SubMergeError = io_lib:format("Could not find a" 116 | " non-composed spatial spec in the spatial merge" 117 | " targeted at `~s`", 118 | [couch_index_merger:rem_passwd(MergeUrl)]), 119 | throw({bad_request, SubMergeError}) 120 | end, 121 | #merged_index_spec{url = MergeUrl, ejson_spec = EJson}; 122 | _ -> 123 | SubMergeError = io_lib:format("Invalid spatial merge" 124 | " definition for sub-merge done at `~s`.", 125 | [couch_index_merger:rem_passwd(MergeUrl)]), 126 | throw({bad_request, SubMergeError}) 127 | end; 128 | (_) -> 129 | throw({bad_request, "Invalid spatial merge definition object."}) 130 | end, Indexes)); 131 | 132 | validate_spatial_param(_) -> 133 | throw({bad_request, <<"`spatial` parameter must be an object with at ", 134 | "least 1 property.">>}). 135 | 136 | parse_spatial_name(Name) -> 137 | case string:tokens(couch_util:trim(?b2l(Name)), "/") of 138 | [DDocName, ViewName0] -> 139 | {nil, <<"_design/", (?l2b(DDocName))/binary>>, ?l2b(ViewName0)}; 140 | ["_design", DDocName, ViewName0] -> 141 | {nil, <<"_design/", (?l2b(DDocName))/binary>>, ?l2b(ViewName0)}; 142 | [DDocDbName1, DDocName, ViewName0] -> 143 | DDocDbName = ?l2b(couch_httpd:unquote(DDocDbName1)), 144 | {DDocDbName, <<"_design/", (?l2b(DDocName))/binary>>, ?l2b(ViewName0)}; 145 | [DDocDbName1, "_design", DDocName, ViewName0] -> 146 | DDocDbName = ?l2b(couch_httpd:unquote(DDocDbName1)), 147 | {DDocDbName, <<"_design/", (?l2b(DDocName))/binary>>, ?l2b(ViewName0)}; 148 | _ -> 149 | throw({bad_request, "A `spatial` property must have the shape" 150 | " `ddoc_name/spatial_name`."}) 151 | end. 152 | -------------------------------------------------------------------------------- /src/geocouch/couch_httpd_spatial_list.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(couch_httpd_spatial_list). 14 | -include("couch_db.hrl"). 15 | -include("couch_spatial.hrl"). 16 | 17 | -export([handle_spatial_list_req/3]). 18 | % deprecated API 19 | -export([handle_spatial_list_req_deprecated/3]). 20 | 21 | -import(couch_httpd, [send_json/2, send_method_not_allowed/2, 22 | send_error/4, send_chunked_error/2]). 23 | 24 | 25 | % spatial-list request with spatial index and list from same design doc. 26 | handle_spatial_list_req(#httpd{method='GET', 27 | path_parts=[_, _, DesignName, _, _, ListName, SpatialName]}=Req, Db, DDoc) -> 28 | handle_spatial_list(Req, Db, DDoc, ListName, {DesignName, SpatialName}); 29 | 30 | % spatial-list request with spatial index and list from different design docs. 31 | handle_spatial_list_req(#httpd{method='GET', 32 | path_parts=[_, _, _, _, _, ListName, DesignName, SpatialName]}=Req, 33 | Db, DDoc) -> 34 | handle_spatial_list(Req, Db, DDoc, ListName, {DesignName, SpatialName}); 35 | 36 | handle_spatial_list_req(#httpd{method='GET'}=Req, _Db, _DDoc) -> 37 | send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>); 38 | 39 | % POST isn't supported as spatial indexes don't support mutli-key fetch 40 | handle_spatial_list_req(Req, _Db, _DDoc) -> 41 | send_method_not_allowed(Req, "GET,HEAD"). 42 | 43 | handle_spatial_list(Req, Db, DDoc, LName, {SpatialDesignName, SpatialName}) -> 44 | SpatialDesignId = <<"_design/", SpatialDesignName/binary>>, 45 | {ok, Index, Group, QueryArgs} = couch_httpd_spatial:load_index( 46 | Req, Db, {SpatialDesignId, SpatialName}), 47 | Etag = list_etag(Req, Db, Group, Index, couch_httpd:doc_etag(DDoc)), 48 | couch_httpd:etag_respond(Req, Etag, fun() -> 49 | output_list(Req, Db, DDoc, LName, Index, QueryArgs, Etag, Group) 50 | end). 51 | 52 | % Deprecated API call 53 | handle_spatial_list_req_deprecated(#httpd{method='GET', 54 | path_parts=[A, B, DesignName, C, ListName, SpatialName]}=Req, Db, DDoc) -> 55 | Req2 = Req#httpd{path_parts= 56 | [A, B, DesignName, C, <<"foo">>, ListName, SpatialName]}, 57 | ?LOG_INFO("WARNING: Request to deprecated _spatiallist handler, " ++ 58 | "please use _spatial/_list instead!", []), 59 | handle_spatial_list_req(Req2, Db, DDoc); 60 | handle_spatial_list_req_deprecated(#httpd{method='GET', 61 | path_parts=[A, B, C, D, ListName, DesignName, SpatialName]}=Req, 62 | Db, DDoc) -> 63 | Req2 = Req#httpd{path_parts= 64 | [A, B, C, D, <<"foo">>, ListName, DesignName, SpatialName]}, 65 | ?LOG_INFO("WARNING: Request to deprecated _spatiallist handler, " ++ 66 | "please use _spatial/_list instead!", []), 67 | handle_spatial_list_req(Req2, Db, DDoc); 68 | handle_spatial_list_req_deprecated(#httpd{method='GET'}=Req, _Db, _DDoc) -> 69 | send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>); 70 | handle_spatial_list_req_deprecated(Req, _Db, _DDoc) -> 71 | send_method_not_allowed(Req, "GET,HEAD"). 72 | 73 | 74 | list_etag(#httpd{user_ctx=UserCtx}=Req, Db, Group, Index, More) -> 75 | Accept = couch_httpd:header_value(Req, "Accept"), 76 | couch_httpd_spatial:spatial_etag( 77 | Db, Group, Index, {More, Accept, UserCtx#user_ctx.roles}). 78 | 79 | output_list(_, _, _, _, _, #spatial_query_args{bbox=nil}, _, _) -> 80 | throw({spatial_query_error, <<"Bounding box not specified.">>}); 81 | 82 | output_list(Req, Db, DDoc, LName, Index, QueryArgs, Etag, Group) -> 83 | #spatial_query_args{ 84 | bbox = Bbox, 85 | bounds = Bounds, 86 | limit = Limit, 87 | skip = SkipCount 88 | } = QueryArgs, 89 | 90 | couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) -> 91 | CurrentSeq = Group#spatial_group.current_seq, 92 | HelperFuns = #spatial_fold_helper_funs{ 93 | start_response = StartListRespFun = make_spatial_start_resp_fun( 94 | QServer, Db, LName), 95 | send_row = make_spatial_get_row_fun(QServer) 96 | }, 97 | FoldFun = couch_httpd_spatial:make_spatial_fold_funs(Req, QueryArgs, 98 | Etag, Db, CurrentSeq, HelperFuns), 99 | FoldAccInit = {Limit, SkipCount, undefined, ""}, 100 | {ok, FoldResult} = couch_spatial:fold(Index, FoldFun, FoldAccInit, 101 | Bbox, Bounds), 102 | finish_list(Req, QServer, Etag, FoldResult, StartListRespFun, 103 | CurrentSeq) 104 | end). 105 | 106 | 107 | % Counterpart to make_map_start_resp_fun/3 in couch_http_show. 108 | make_spatial_start_resp_fun(QueryServer, Db, LName) -> 109 | fun(Req, Etag, UpdateSeq) -> 110 | Head = {[{<<"update_seq">>, UpdateSeq}]}, 111 | geocouch_duplicates:start_list_resp(QueryServer, LName, Req, Db, 112 | Head, Etag) 113 | end. 114 | 115 | % Counterpart to make_map_send_row_fun/1 in couch_http_show. 116 | make_spatial_get_row_fun(QueryServer) -> 117 | fun(Resp, {{_Bbox, _DocId}, {_Geom, _Value}}=Row, RowFront) -> 118 | send_list_row(Resp, QueryServer, Row, RowFront) 119 | end. 120 | 121 | send_list_row(Resp, QueryServer, Row, RowFront) -> 122 | try 123 | [Go, Chunks] = prompt_list_row(QueryServer, Row), 124 | Chunk = RowFront ++ ?b2l(?l2b(Chunks)), 125 | geocouch_duplicates:send_non_empty_chunk(Resp, Chunk), 126 | case Go of 127 | <<"chunks">> -> 128 | {ok, ""}; 129 | <<"end">> -> 130 | {stop, stop} 131 | end 132 | catch 133 | throw:Error -> 134 | send_chunked_error(Resp, Error), 135 | throw({already_sent, Resp, Error}) 136 | end. 137 | 138 | prompt_list_row({Proc, _DDocId}, {{Bbox, DocId}, {Geom, Value}}) -> 139 | JsonRow = {[{id, DocId}, {value, Value}, {bbox, tuple_to_list(Bbox)}, 140 | {geometry, couch_spatial_updater:geocouch_to_geojsongeom(Geom)}]}, 141 | couch_query_servers:proc_prompt(Proc, [<<"list_row">>, JsonRow]). 142 | 143 | % Counterpart to finish_list/7 in couch_http_show. 144 | finish_list(Req, {Proc, _DDocId}, Etag, FoldResult, StartFun, CurrentSeq) -> 145 | case FoldResult of 146 | {_, _, undefined, _} -> 147 | {ok, Resp, BeginBody} = StartFun(Req, Etag, CurrentSeq), 148 | [<<"end">>, Chunks] = couch_query_servers:proc_prompt( 149 | Proc, [<<"list_end">>]), 150 | Chunk = BeginBody ++ ?b2l(?l2b(Chunks)), 151 | geocouch_duplicates:send_non_empty_chunk(Resp, Chunk); 152 | {_, _, Resp, stop} -> 153 | ok; 154 | {_, _, Resp, _} -> 155 | [<<"end">>, Chunks] = couch_query_servers:proc_prompt( 156 | Proc, [<<"list_end">>]), 157 | geocouch_duplicates:send_non_empty_chunk(Resp, ?b2l(?l2b(Chunks))) 158 | end, 159 | couch_httpd:last_chunk(Resp). 160 | -------------------------------------------------------------------------------- /test/200-compact.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | %% -*- erlang -*- 3 | 4 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | % use this file except in compliance with the License. You may obtain a copy of 6 | % the License at 7 | % 8 | % http://www.apache.org/licenses/LICENSE-2.0 9 | % 10 | % Unless required by applicable law or agreed to in writing, software 11 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | % License for the specific language governing permissions and limitations under 14 | % the License. 15 | 16 | -record(user_ctx, { 17 | name = null, 18 | roles = [], 19 | handler 20 | }). 21 | 22 | test_db1_name() -> <<"geocouch_test_compaction">>. 23 | test_db2_name() -> <<"geocouch_test_compaction_foreign">>. 24 | ddoc_name() -> <<"foo">>. 25 | admin_user_ctx() -> {user_ctx, #user_ctx{roles = [<<"_admin">>]}}. 26 | 27 | main(_) -> 28 | code:add_pathz(filename:dirname(escript:script_name())), 29 | gc_test_util:init_code_path(), 30 | etap:plan(6), 31 | case (catch test()) of 32 | ok -> 33 | etap:end_tests(); 34 | Other -> 35 | etap:diag(io_lib:format("Test died abnormally: ~p", [Other])), 36 | etap:bail(Other) 37 | end, 38 | ok. 39 | 40 | test() -> 41 | ok = ssl:start(), 42 | ok = lhttpc:start(), 43 | GeoCouchConfig = filename:join( 44 | gc_test_util:root_dir() ++ [gc_test_util:gc_config_file()]), 45 | ConfigFiles = test_util:config_files() ++ [GeoCouchConfig], 46 | couch_server_sup:start_link(ConfigFiles), 47 | timer:sleep(1000), 48 | put(addr, couch_config:get("httpd", "bind_address", "127.0.0.1")), 49 | put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))), 50 | 51 | delete_dbs(), 52 | create_dbs(), 53 | add_design_doc(test_db1_name()), 54 | 55 | test_compaction(), 56 | test_compaction_foreign(), 57 | 58 | delete_dbs(), 59 | couch_server_sup:stop(), 60 | ok. 61 | 62 | 63 | test_compaction() -> 64 | DbName = test_db1_name(), 65 | DdocName = <<"_design/", (ddoc_name())/binary>>, 66 | 67 | insert(DbName), 68 | query_spatial(DbName), 69 | insert(DbName), 70 | query_spatial(DbName), 71 | insert(DbName), 72 | query_spatial(DbName), 73 | 74 | SpatialGroup = couch_spatial:get_group_server(DbName, DdocName), 75 | etap:is(is_pid(SpatialGroup), true, "got spatial group pid"), 76 | etap:is(is_process_alive(SpatialGroup), true, 77 | "spatial group pid is alive"), 78 | 79 | PreCompact = get_spatial_size(DbName), 80 | compact_spatial_group(DbName, ddoc_name()), 81 | PostCompact = get_spatial_size(DbName), 82 | 83 | etap:is(PostCompact < PreCompact, true, "spatial view got compacted"), 84 | ok. 85 | 86 | % This test tests compaction with a Design Document which is not in the same 87 | % database as the data 88 | test_compaction_foreign() -> 89 | DbName = test_db2_name(), 90 | DdocName = <<"_design/", (ddoc_name())/binary>>, 91 | % Name of the database that holds the Design Document 92 | DdocDbName = test_db1_name(), 93 | 94 | insert(DbName), 95 | query_spatial(DbName, DdocDbName), 96 | insert(DbName), 97 | query_spatial(DbName, DdocDbName), 98 | insert(DbName), 99 | query_spatial(DbName, DdocDbName), 100 | 101 | 102 | SpatialGroup = couch_spatial:get_group_server( 103 | {DbName, DdocDbName}, DdocName), 104 | etap:is(is_pid(SpatialGroup), true, "got spatial group pid (b)"), 105 | etap:is(is_process_alive(SpatialGroup), true, 106 | "spatial group pid is alive (b)"), 107 | 108 | PreCompact = get_spatial_size({DbName, DdocDbName}), 109 | compact_spatial_group({DbName, DdocDbName}, ddoc_name()), 110 | PostCompact = get_spatial_size({DbName, DdocDbName}), 111 | 112 | etap:is(PostCompact < PreCompact, true, "spatial view got compacted (b)"), 113 | ok. 114 | 115 | 116 | create_dbs() -> 117 | {ok, Db1} = couch_db:create(test_db1_name(), [admin_user_ctx()]), 118 | {ok, Db2} = couch_db:create(test_db2_name(), [admin_user_ctx()]), 119 | couch_db:close(Db1), 120 | couch_db:close(Db2), 121 | ok. 122 | 123 | delete_dbs() -> 124 | couch_server:delete(test_db1_name(), [admin_user_ctx()]), 125 | couch_server:delete(test_db2_name(), [admin_user_ctx()]). 126 | 127 | 128 | compact_spatial_group(DbName, DdocName) -> 129 | ok = couch_spatial_compactor:start_compact(DbName, DdocName), 130 | wait_compaction_finished(DbName). 131 | 132 | 133 | add_design_doc(DbName) -> 134 | {ok, Db} = couch_db:open_int(DbName, [admin_user_ctx()]), 135 | DDoc = couch_doc:from_json_obj({[ 136 | {<<"_id">>, <<"_design/foo">>}, 137 | {<<"language">>, <<"javascript">>}, 138 | {<<"spatial">>, {[ 139 | {<<"foo">>, <<"function(doc) { emit({type: \"Point\", coordinates: [0,0]}, doc); }">>} 140 | ]}} 141 | ]}), 142 | ok = couch_db:update_docs(Db, [DDoc]), 143 | {ok, _} = couch_db:ensure_full_commit(Db), 144 | couch_db:close(Db), 145 | ok. 146 | 147 | 148 | % Inserts documents and queries the spatial view 149 | insert(DbName) -> 150 | {ok, Db} = couch_db:open_int(DbName, [admin_user_ctx()]), 151 | _Docs = lists:map( 152 | fun(_) -> 153 | Doc = couch_doc:from_json_obj({[{<<"_id">>, couch_uuids:new()}]}), 154 | ok = couch_db:update_docs(Db, [Doc]) 155 | end, 156 | lists:seq(1, 100)), 157 | couch_db:close(Db), 158 | ok. 159 | 160 | query_spatial(DbName) -> 161 | {ok, Db} = couch_db:open_int(DbName, [admin_user_ctx()]), 162 | DdocName = <<"_design/", (ddoc_name())/binary>>, 163 | % Don't use the HTTP API, as it doesn't support foreig Design 164 | % Documents (those are Design Documents that are not in the same 165 | % database as the data is). 166 | {ok, _Index, _Group} = couch_spatial:get_spatial_index( 167 | Db, DdocName, ddoc_name(), nil), 168 | couch_db:close(Db). 169 | 170 | query_spatial(DbName, DdocDbName) -> 171 | {ok, Db} = couch_db:open_int(DbName, [admin_user_ctx()]), 172 | {ok, DdocDb} = couch_db:open_int(DdocDbName, [admin_user_ctx()]), 173 | DdocName = <<"_design/", (ddoc_name())/binary>>, 174 | % Don't use the HTTP API, as it doesn't support foreig Design 175 | % Documents (those are Design Documents that are not in the same 176 | % database as the data is). 177 | {ok, _Index, _Group} = couch_spatial:get_spatial_index( 178 | Db, {DdocDb, DdocName}, ddoc_name(), nil), 179 | couch_db:close(Db), 180 | couch_db:close(DdocDb). 181 | 182 | 183 | wait_compaction_finished(DbName) -> 184 | Parent = self(), 185 | Loop = spawn_link(fun() -> wait_loop(Parent, DbName) end), 186 | receive 187 | {done, Loop} -> 188 | etap:diag("Spatial compaction has finished") 189 | after 60000 -> 190 | etap:bail("Compaction not triggered") 191 | end. 192 | 193 | wait_loop(Parent, DbName) -> 194 | DdocName = <<"_design/", (ddoc_name())/binary>>, 195 | {ok, SpatialInfo} = couch_spatial:get_group_info(DbName, DdocName), 196 | case couch_util:get_value(compact_running, SpatialInfo) =:= true of 197 | false -> 198 | Parent ! {done, self()}; 199 | true -> 200 | ok = timer:sleep(500), 201 | wait_loop(Parent, DbName) 202 | end. 203 | 204 | 205 | get_spatial_size(DbName) -> 206 | DdocName = <<"_design/", (ddoc_name())/binary>>, 207 | {ok, SpatialInfo} = couch_spatial:get_group_info(DbName, DdocName), 208 | couch_util:get_value(disk_size, SpatialInfo). 209 | -------------------------------------------------------------------------------- /src/geocouch/couch_spatial.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(couch_spatial). 14 | -behaviour(gen_server). 15 | 16 | -export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, 17 | terminate/2, code_change/3]). 18 | -export([fold/5]). 19 | % For List functions 20 | -export([get_spatial_index/4]). 21 | % For compactor 22 | -export([get_group_server/2, get_item_count/2]). 23 | % For _spatialinfo 24 | -export([get_group_info/2]). 25 | % For _spatial_cleanup 26 | -export([cleanup_index_files/1]). 27 | 28 | 29 | -include("couch_db.hrl"). 30 | -include("couch_spatial.hrl"). 31 | 32 | 33 | start_link() -> 34 | ?LOG_DEBUG("Spatial daemon: starting link.", []), 35 | gen_server:start_link({local, couch_spatial}, couch_spatial, [], []). 36 | 37 | init([]) -> 38 | RootDir = couch_config:get("couchdb", "view_index_dir"), 39 | ets:new(couch_spatial_groups_by_db, [bag, private, named_table]), 40 | ets:new(spatial_group_servers_by_sig, [set, protected, named_table]), 41 | ets:new(couch_spatial_groups_by_updater, [set, private, named_table]), 42 | process_flag(trap_exit, true), 43 | {ok, #spatial{root_dir=RootDir}}. 44 | 45 | add_to_ets(Pid, DbName, Sig) -> 46 | true = ets:insert(couch_spatial_groups_by_updater, {Pid, {DbName, Sig}}), 47 | true = ets:insert(spatial_group_servers_by_sig, {{DbName, Sig}, Pid}), 48 | true = ets:insert(couch_spatial_groups_by_db, {DbName, Sig}). 49 | 50 | 51 | delete_from_ets(Pid, DbName, Sig) -> 52 | true = ets:delete(couch_spatial_groups_by_updater, Pid), 53 | true = ets:delete(spatial_group_servers_by_sig, {DbName, Sig}), 54 | true = ets:delete_object(couch_spatial_groups_by_db, {DbName, Sig}). 55 | 56 | % For foreign Design Documents (stored in a different DB) 57 | get_group_server({DbName, GroupDbName}, GroupId) when is_binary(GroupId) -> 58 | DbGroup = case GroupId of 59 | <> -> 60 | open_db_group(GroupDbName, GroupId); 61 | _ -> 62 | open_db_group(GroupDbName, <>) 63 | end, 64 | get_group_server(DbName, DbGroup); 65 | get_group_server(DbName, GroupId) when is_binary(GroupId) -> 66 | Group = open_db_group(DbName, GroupId), 67 | get_group_server(DbName, Group); 68 | get_group_server(DbName, Group) -> 69 | case gen_server:call(couch_spatial, {get_group_server, DbName, Group}, infinity) of 70 | {ok, Pid} -> 71 | Pid; 72 | Error -> 73 | throw(Error) 74 | end. 75 | 76 | open_db_group(DbName, GroupId) -> 77 | case couch_spatial_group:open_db_group(DbName, GroupId) of 78 | {ok, Group} -> 79 | Group; 80 | Error -> 81 | throw(Error) 82 | end. 83 | 84 | get_group(Db, {GroupDb, GroupId}, Stale) -> 85 | DbGroup = open_db_group(couch_db:name(GroupDb), GroupId), 86 | do_get_group(Db, DbGroup, Stale); 87 | get_group(Db, GroupId, Stale) -> 88 | DbGroup = open_db_group(couch_db:name(Db), GroupId), 89 | do_get_group(Db, DbGroup, Stale). 90 | 91 | 92 | do_get_group(Db, GroupId, Stale) -> 93 | MinUpdateSeq = case Stale of 94 | ok -> 0; 95 | update_after -> 0; 96 | _Else -> couch_db:get_update_seq(Db) 97 | end, 98 | GroupPid = get_group_server(couch_db:name(Db), GroupId), 99 | Result = couch_spatial_group:request_group(GroupPid, MinUpdateSeq), 100 | case Stale of 101 | update_after -> 102 | % best effort, process might die 103 | spawn(fun() -> 104 | LastSeq = couch_db:get_update_seq(Db), 105 | couch_spatial_group:request_group(GroupPid, LastSeq) 106 | end); 107 | _ -> 108 | ok 109 | end, 110 | Result. 111 | 112 | get_group_info({DbName, GroupDbName}, GroupId) -> 113 | GroupPid = get_group_server({DbName, GroupDbName}, GroupId), 114 | couch_view_group:request_group_info(GroupPid); 115 | get_group_info(#db{name = DbName}, GroupId) -> 116 | get_group_info(DbName, GroupId); 117 | get_group_info(DbName, GroupId) -> 118 | couch_view_group:request_group_info(get_group_server(DbName, GroupId)). 119 | 120 | 121 | % The only reason why couch_view:cleanup_index_files can't be used is the 122 | % call to get_group_info 123 | cleanup_index_files(Db) -> 124 | % load all ddocs 125 | {ok, DesignDocs} = couch_db:get_design_docs(Db), 126 | 127 | % make unique list of group sigs 128 | Sigs = lists:map(fun(#doc{id = GroupId}) -> 129 | {ok, Info} = get_group_info(Db, GroupId), 130 | ?b2l(couch_util:get_value(signature, Info)) 131 | end, [DD||DD <- DesignDocs, DD#doc.deleted == false]), 132 | FileList = list_index_files(Db), 133 | % regex that matches all ddocs 134 | RegExp = "("++ string:join(Sigs, "|") ++")", 135 | 136 | % filter out the ones in use 137 | DeleteFiles = [FilePath 138 | || FilePath <- FileList, 139 | re:run(FilePath, RegExp, [{capture, none}]) =:= nomatch], 140 | % delete unused files 141 | ?LOG_DEBUG("deleting unused view index files: ~p",[DeleteFiles]), 142 | RootDir = couch_config:get("couchdb", "view_index_dir"), 143 | [couch_file:delete(RootDir,File,false)||File <- DeleteFiles], 144 | ok. 145 | 146 | delete_index_dir(RootDir, DbName) -> 147 | couch_view:nuke_dir(RootDir ++ "/." ++ ?b2l(DbName) ++ "_design"). 148 | 149 | list_index_files(Db) -> 150 | % call server to fetch the index files 151 | RootDir = couch_config:get("couchdb", "view_index_dir"), 152 | filelib:wildcard(RootDir ++ "/." ++ ?b2l(couch_db:name(Db)) ++ 153 | "_design"++"/*.spatial"). 154 | 155 | % XXX NOTE vmx: I don't know when this case happens 156 | do_reset_indexes(DbName, Root) -> 157 | % shutdown all the updaters and clear the files, the db got changed 158 | Names = ets:lookup(couch_spatial_groups_by_db, DbName), 159 | lists:foreach( 160 | fun({_DbName, Sig}) -> 161 | ?LOG_DEBUG("Killing update process for spatial group ~s. in database ~s.", [Sig, DbName]), 162 | [{_, Pid}] = ets:lookup(spatial_group_servers_by_sig, {DbName, Sig}), 163 | exit(Pid, kill), 164 | receive {'EXIT', Pid, _} -> 165 | delete_from_ets(Pid, DbName, Sig) 166 | end 167 | end, Names), 168 | delete_index_dir(Root, DbName), 169 | file:delete(Root ++ "/." ++ ?b2l(DbName) ++ "_temp"). 170 | 171 | % counterpart in couch_view is get_map_view/4 172 | get_spatial_index(Db, GroupId, Name, Stale) -> 173 | case get_group(Db, GroupId, Stale) of 174 | {ok, #spatial_group{indexes=Indexes}=Group} -> 175 | case get_spatial_index0(Name, Indexes) of 176 | {ok, Index} -> 177 | {ok, Index, Group}; 178 | Else -> 179 | Else 180 | end; 181 | Error -> 182 | Error 183 | end. 184 | 185 | get_spatial_index0(_Name, []) -> 186 | {not_found, missing_named_index}; 187 | get_spatial_index0(Name, [#spatial{index_names=IndexNames}=Index|Rest]) -> 188 | % NOTE vmx: I don't understand why need lists:member and recursion 189 | case lists:member(Name, IndexNames) of 190 | true -> {ok, Index}; 191 | false -> get_spatial_index0(Name, Rest) 192 | end. 193 | 194 | 195 | terminate(_Reason, _Srv) -> 196 | ok. 197 | 198 | handle_call({get_group_server, DbName, 199 | #spatial_group{name=GroupId,sig=Sig}=Group}, _From, 200 | #spatial{root_dir=Root}=Server) -> 201 | case ets:lookup(spatial_group_servers_by_sig, {DbName, Sig}) of 202 | [] -> 203 | ?LOG_DEBUG("Spawning new group server for spatial group ~s in database ~s.", 204 | [GroupId, DbName]), 205 | case (catch couch_spatial_group:start_link({Root, DbName, Group})) of 206 | {ok, NewPid} -> 207 | add_to_ets(NewPid, DbName, Sig), 208 | {reply, {ok, NewPid}, Server}; 209 | {error, invalid_view_seq} -> 210 | do_reset_indexes(DbName, Root), 211 | case (catch couch_spatial_group:start_link({Root, DbName, Group})) of 212 | {ok, NewPid} -> 213 | add_to_ets(NewPid, DbName, Sig), 214 | {reply, {ok, NewPid}, Server}; 215 | Error -> 216 | {reply, Error, Server} 217 | end; 218 | Error -> 219 | {reply, Error, Server} 220 | end; 221 | [{_, ExistingPid}] -> 222 | {reply, {ok, ExistingPid}, Server} 223 | end. 224 | 225 | 226 | handle_cast(foo,State) -> 227 | {noreply, State}. 228 | 229 | % Cleanup on exit, e.g. resetting the group information stored in ETS tables 230 | handle_info({'EXIT', FromPid, Reason}, Server) -> 231 | case ets:lookup(couch_spatial_groups_by_updater, FromPid) of 232 | [] -> 233 | if Reason /= normal -> 234 | % non-updater linked process died, we propagate the error 235 | ?LOG_ERROR("Exit on non-updater process: ~p", [Reason]), 236 | exit(Reason); 237 | true -> ok 238 | end; 239 | [{_, {DbName, GroupId}}] -> 240 | delete_from_ets(FromPid, DbName, GroupId) 241 | end, 242 | {noreply, Server}; 243 | 244 | handle_info(_Msg, Server) -> 245 | {noreply, Server}. 246 | 247 | code_change(_OldVsn, State, _Extra) -> 248 | {ok, State}. 249 | 250 | % counterpart in couch_view is fold/4 251 | fold(Index, FoldFun, InitAcc, Bbox, Bounds) -> 252 | WrapperFun = fun(Node, Acc) -> 253 | Expanded = couch_view:expand_dups([Node], []), 254 | lists:foldl(fun(E, {ok, Acc2}) -> 255 | FoldFun(E, Acc2) 256 | end, {ok, Acc}, Expanded) 257 | end, 258 | {_State, Acc} = vtree:lookup( 259 | Index#spatial.fd, Index#spatial.treepos, Bbox, 260 | {WrapperFun, InitAcc}, Bounds), 261 | {ok, Acc}. 262 | 263 | % counterpart in couch_view is get_row_count/1 264 | get_item_count(Fd, TreePos) -> 265 | Count = vtree:count_total(Fd, TreePos), 266 | {ok, Count}. 267 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Welcome to the world of GeoCouch 2 | ================================ 3 | 4 | GeoCouch is a spatial extension for Apache CouchDB and Couchbase. 5 | 6 | Prerequisites 7 | ------------- 8 | 9 | A working installation of CouchDB with corresponding source 10 | code. GeoCouch works best with Couchbase and the latest stable releases of 11 | CouchDB (should be >= 1.1.0). 12 | 13 | ### Understanding the branches: 14 | 15 | This repository contains several branches, please make sure you use 16 | the correct one: 17 | 18 | - master: works with the CouchDB master branch from Couchbase's repo 19 | (https://github.com/couchbase/couchdb) 20 | - couchdb1.1.x: works with Apache CouchDB 1.1.x 21 | - couchdb1.2.x: works with Apache CouchDB 1.2.x 22 | - there is currently no branch for Apache CouchDB 1.3.x 23 | 24 | Installation 25 | ------------ 26 | 27 | ### Get GeoCouch: 28 | 29 | git clone https://github.com/couchbase/geocouch.git 30 | cd geocouch 31 | 32 | ### Compilation 33 | 34 | Note: Always replace `` with the path to your CouchDB 35 | source and `` with the location of the GeoCouch source. 36 | 37 | Set the `COUCH_SRC` environment to the directory that contains the 38 | CouchDB core source (`/src/couchdb/`). 39 | 40 | export COUCH_SRC=/src/couchdb 41 | 42 | Run "make" in your directory 43 | 44 | make 45 | 46 | Copy the configuration file for GeoCouch from 47 | `/etc/couchdb/default.d/` to 48 | `/etc/couchdb/default.d/` 49 | 50 | cp /etc/couchdb/default.d/geocouch.ini /etc/couchdb/default.d/ 51 | 52 | ### Futon tests 53 | 54 | To make sure your installation is working also copy the Futon tests 55 | over (from `/share/www/script/test` to 56 | `/share/www/script/test`): 57 | 58 | cp /share/www/script/test/* /share/www/script/test/ 59 | 60 | Add the test to `/share/www/script/couch_tests.js` 61 | 62 | loadTest("spatial.js"); 63 | loadTest("list_spatial.js"); 64 | loadTest("etags_spatial.js"); 65 | loadTest("multiple_spatial_rows.js"); 66 | loadTest("spatial_compaction.js"); 67 | loadTest("spatial_design_docs.js"); 68 | loadTest("spatial_bugfixes.js"); 69 | loadTest("spatial_merging.js"); 70 | loadTest("spatial_offsets.js"); 71 | 72 | ### Run CouchDB with GeoCouch 73 | 74 | The compiled beam files from GeoCouch need to be in Erlang's path, 75 | which can be set with the `ERL_FLAGS` environment variable: 76 | 77 | export ERL_FLAGS="-pa /ebin" 78 | 79 | If you run a dev instance with CouchDB's `./utils/run` you can also 80 | define it on startup: 81 | 82 | ERL_FLAGS="-pa /ebin" /utils/run 83 | 84 | 85 | Using GeoCouch 86 | -------------- 87 | 88 | Create a database: 89 | 90 | curl -X PUT http://127.0.0.1:5984/places 91 | 92 | Add a Design Document with a spatial function: 93 | 94 | curl -X PUT -d '{"spatial":{"points":"function(doc) {\n if (doc.loc) {\n emit({\n type: \"Point\",\n coordinates: [doc.loc[0], doc.loc[1]]\n }, [doc._id, doc.loc]);\n }};"}}' http://127.0.0.1:5984/places/_design/main 95 | 96 | Put some data into it: 97 | 98 | curl -X PUT -d '{"loc": [-122.270833, 37.804444]}' http://127.0.0.1:5984/places/oakland 99 | curl -X PUT -d '{"loc": [10.898333, 48.371667]}' http://127.0.0.1:5984/places/augsburg 100 | 101 | Make a bounding box request: 102 | 103 | curl -X GET 'http://localhost:5984/places/_design/main/_spatial/points?bbox=0,0,180,90' 104 | 105 | It should return: 106 | 107 | {"update_seq":3,"rows":[ 108 | {"id":"augsburg","bbox":[10.898333,48.371667,10.898333,48.371667],"geometry":{"type":"Point","coordinates":[10.898333,48.371667]},"value":["augsburg",[10.898333,48.371667]]} 109 | ]} 110 | 111 | The Design Document Function 112 | ---------------------------- 113 | 114 | function(doc) { 115 | if (doc.loc) { 116 | emit({ 117 | type: "Point", 118 | coordinates: [doc.loc[0], doc.loc[1]] 119 | }, [doc._id, doc.loc]); 120 | }};" 121 | 122 | It uses the emit() from normal views. The key is a 123 | [GeoJSON](http://geojson.org) geometry, the value is any arbitrary JSON. All 124 | geometry types (even GemetryCollections) are supported. 125 | 126 | If the GeoJSON geometry contains a `bbox` property it will be used instead 127 | of calculating it from the geometry (even if it's wrong, i.e. is not 128 | the actual bounding box). 129 | 130 | 131 | Bounding box search and the date line 132 | ------------------------------------- 133 | 134 | A common problem when performing bounding box searches is the date 135 | line/poles. As the bounding box follows the GeoJSON specification, 136 | where the first two numbers are the lower left coordinate, the last 137 | two numbers the upper right coordinate, it is easy to map it over the 138 | date line/poles. The lower coordinate would have a higher value than 139 | the upper one. Such a bounding box has a seems invalid at first 140 | glance, but isn't. For example a bounding box like `110,-60,-30,15` 141 | would include Australia and South America, but not Africa. 142 | 143 | GeoCouch operates on a plane and doesn't perform spherical 144 | calculations. Therefore the bounds of the plane needs to be set 145 | explicitly with the `plane_bounds` parameter. If bounding boxes are 146 | flipped, a search across those bounds will be performed 147 | automatically. Give it a try (with the same Design Document as 148 | above). Insert some Documents: 149 | 150 | curl -X PUT -d '{"loc": [17.15, -22.566667]}' http://127.0.0.1:5984/places/namibia 151 | curl -X PUT -d '{"loc": [135, -25]}' http://127.0.0.1:5984/places/australia 152 | curl -X PUT -d '{"loc": [-52.95, -10.65]}' http://127.0.0.1:5984/places/brasilia 153 | 154 | And request only Australia and Brasilia: 155 | 156 | curl -X GET 'http://localhost:5984/places/_design/main/_spatial/points?bbox=110,-60,-30,15&plane_bounds=-180,-90,180,90' 157 | 158 | The result is as expected: 159 | 160 | {"update_seq":6,"rows":[ 161 | {"id":"australia","bbox":[135,-25,135,-25],"geometry":{"type":"Point","coordinates":[135,-25]},"value":["australia",[135,-25]]}, 162 | {"id":"brasilia","bbox":[-52.95,-10.65,-52.95,-10.65],"geometry":{"type":"Point","coordinates":[-52.95,-10.65]},"value":["brasilia",[-52.95,-10.65]]} 163 | ]} 164 | 165 | The bounding with the same numbers, but different order 166 | (`-30,-60,110,15`) would only return Namibia: 167 | 168 | curl -X GET 'http://localhost:5984/places/_design/main/_spatial/points?bbox=-30,-60,110,15&plane_bounds=-180,-90,180,90' 169 | 170 | {"update_seq":6,"rows":[ 171 | {"id":"namibia","bbox":[17.15,-22.566667,17.15,-22.566667],"geometry":{"type":"Point","coordinates":[17.15,-22.566667]},"value":["namibia",[17.15,-22.566667]]} 172 | ]} 173 | 174 | List function support 175 | --------------------- 176 | 177 | GeoCouch supports List functions just as CouchDB does for Views. This way 178 | you can output any arbitrary format, e.g. GeoRSS. 179 | 180 | As an example we output the points as WKT. Add a new Design Document 181 | with an additional List function (the rest is the same as above). Make 182 | sure you use the right `_rev`: 183 | 184 | curl -X PUT -d '{"_rev": "1-121efc747b00743b8c7621ffccf1ac40", "lists": {"wkt": "function(head, req) {\n var row;\n while (row = getRow()) {\n send(\"POINT(\" + row.geometry.coordinates.join(\" \") + \")\\n\");\n }\n};"}, "spatial":{"points":"function(doc) {\n if (doc.loc) {\n emit({\n type: \"Point\",\n coordinates: [doc.loc[0], doc.loc[1]]\n }, [doc._id, doc.loc]);\n }};"}}' http://127.0.0.1:5984/places/_design/main 185 | 186 | Now you can request this List function as you would do for CouchDB, 187 | though with a different Design handler (`_spatial/_list` instead of 188 | `_list` ): 189 | 190 | curl -X GET 'http://localhost:5984/places/_design/main/_spatial/_list/wkt/points?bbox=-180,-90,180,90' 191 | 192 | The result is: 193 | 194 | POINT(10.898333 48.371667) 195 | POINT(-122.270833 37.804444) 196 | POINT(17.15 -22.566667) 197 | POINT(135 -25) 198 | POINT(-52.95 -10.65) 199 | 200 | Using List functions from Design Documents other than the one containing the 201 | Spatial functions is supported as well. This time we add the Document 202 | ID in parenthesis: 203 | 204 | curl -X PUT -d '{"lists": {"wkt": "function(head, req) {\n var row;\n while (row = getRow()) {\n send(\"POINT(\" + row.geometry.coordinates.join(\" \") + \") (\" + row.id + \")\\n\");\n }\n};"}}' http://127.0.0.1:5984/places/_design/listfunonly 205 | 206 | curl -X GET 'http://localhost:5984/places/_design/listfunonly/_spatial/_list/wkt/main/points?bbox=-180,-90,180,90' 207 | 208 | 209 | Other supported query arguments 210 | ------------------------------- 211 | 212 | ### stale ### 213 | `stale=ok` is supported. The spatial index won't be rebuilt even if 214 | new Documents were added. It works for normal spatial queries as well 215 | as for the spatial List functions. 216 | 217 | ### count ### 218 | `count` is a boolean. `count=true` will only return the number of geometries 219 | the query will return, not the geometry themselves. 220 | 221 | curl -X GET 'http://localhost:5984/places/_design/main/_spatial/points?bbox=0,0,180,90&count=true' 222 | 223 | {"count":1} 224 | 225 | ### limit ### 226 | With `limit` you can limit the number of rows that should be returned. 227 | 228 | curl -X GET 'http://localhost:5984/places/_design/main/_spatial/points?bbox=-180,-90,180,90&limit=2' 229 | 230 | {"update_seq":8,"rows":[ 231 | {"id":"augsburg","bbox":[10.898333,48.371667,10.898333,48.371667],"geometry":{"type":"Point","coordinates":[10.898333,48.371667]},"value":["augsburg",[10.898333,48.371667]]}, 232 | {"id":"oakland","bbox":[-122.270833,37.804444,-122.270833,37.804444],"geometry":{"type":"Point","coordinates":[-122.270833,37.804444]},"value":["oakland",[-122.270833,37.804444]]} 233 | ]} 234 | 235 | ### skip ### 236 | With `skip` you start to return the results at a certain offset. 237 | 238 | curl -X GET 'http://localhost:5984/places/_design/main/_spatial/points?bbox=-180,-90,180,90&skip=3' 239 | 240 | {"update_seq":8,"rows":[ 241 | {"id":"australia","bbox":[135,-25,135,-25],"geometry":{"type":"Point","coordinates":[135,-25]},"value":["australia",[135,-25]]}, 242 | {"id":"brasilia","bbox":[-52.95,-10.65,-52.95,-10.65],"geometry":{"type":"Point","coordinates":[-52.95,-10.65]},"value":["brasilia",[-52.95,-10.65]]} 243 | ]} 244 | 245 | 246 | Compaction, cleanup and info 247 | ---------------------------- 248 | 249 | The API of GeoCouch's spatial indexes is similar to the one for the 250 | Views. Compaction of spatial indexes is per Design Document, thus: 251 | 252 | curl -X POST 'http://localhost:5984/places/_design/main/_spatial/_compact' -H 'Content-Type: application/json' 253 | 254 | To cleanup spatial indexes that are no longer in use (this is per database): 255 | 256 | curl -X POST 'http://localhost:5984/places/_spatial_cleanup' -H 'Content-Type: application/json' 257 | 258 | To get information about the spatial indexes of a certain Design 259 | Document use the the `_info` handler: 260 | 261 | curl -X GET 'http://localhost:5984/places/_design/main/_spatial/_info' 262 | -------------------------------------------------------------------------------- /src/geocouch/couch_httpd_spatial.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(couch_httpd_spatial). 14 | -include("couch_db.hrl"). 15 | -include("couch_spatial.hrl"). 16 | 17 | -export([handle_spatial_req/3, spatial_etag/3, spatial_etag/4, 18 | load_index/3, handle_compact_req/3, handle_design_info_req/3, 19 | handle_spatial_cleanup_req/2, parse_spatial_params/1, 20 | make_spatial_fold_funs/6]). 21 | 22 | -import(couch_httpd, 23 | [send_json/2, send_json/3, send_method_not_allowed/2, send_chunk/2, 24 | start_json_response/2, start_json_response/3, end_json_response/1]). 25 | 26 | % Either answer a normal spatial query, or keep dispatching if the path part 27 | % after _spatial starts with an underscore. 28 | handle_spatial_req(#httpd{ 29 | path_parts=[_, _, _Dname, _, SpatialName|_]}=Req, Db, DDoc) -> 30 | case SpatialName of 31 | % the path after _spatial starts with an underscore => dispatch 32 | <<$_,_/binary>> -> 33 | dispatch_sub_spatial_req(Req, Db, DDoc); 34 | _ -> 35 | handle_spatial(Req, Db, DDoc) 36 | end. 37 | 38 | % the dispatching of endpoints below _spatial needs to be done manually 39 | dispatch_sub_spatial_req(#httpd{ 40 | path_parts=[_, _, _DName, Spatial, SpatialDisp|_]}=Req, 41 | Db, DDoc) -> 42 | Conf = couch_config:get("httpd_design_handlers", 43 | ?b2l(<>)), 44 | Fun = geocouch_duplicates:make_arity_3_fun(Conf), 45 | apply(Fun, [Req, Db, DDoc]). 46 | 47 | handle_spatial(#httpd{method='GET', 48 | path_parts=[_, _, DName, _, SpatialName]}=Req, Db, DDoc) -> 49 | ?LOG_DEBUG("Spatial query (~p): ~n~p", [DName, DDoc#doc.id]), 50 | #spatial_query_args{ 51 | stale = Stale 52 | } = QueryArgs = parse_spatial_params(Req), 53 | {ok, Index, Group} = couch_spatial:get_spatial_index( 54 | Db, DDoc#doc.id, SpatialName, Stale), 55 | output_spatial_index(Req, Index, Group, Db, QueryArgs); 56 | handle_spatial(Req, _Db, _DDoc) -> 57 | send_method_not_allowed(Req, "GET,HEAD"). 58 | 59 | % pendant is in couch_httpd_db 60 | handle_compact_req(#httpd{method='POST', 61 | path_parts=[DbName, _ , DName|_]}=Req, Db, _DDoc) -> 62 | ok = couch_db:check_is_admin(Db), 63 | couch_httpd:validate_ctype(Req, "application/json"), 64 | ok = couch_spatial_compactor:start_compact(DbName, DName), 65 | send_json(Req, 202, {[{ok, true}]}); 66 | handle_compact_req(Req, _Db, _DDoc) -> 67 | send_method_not_allowed(Req, "POST"). 68 | 69 | % pendant is in couch_httpd_db 70 | handle_spatial_cleanup_req(#httpd{method='POST'}=Req, Db) -> 71 | % delete unreferenced index files 72 | ok = couch_db:check_is_admin(Db), 73 | couch_httpd:validate_ctype(Req, "application/json"), 74 | ok = couch_spatial:cleanup_index_files(Db), 75 | send_json(Req, 202, {[{ok, true}]}); 76 | 77 | handle_spatial_cleanup_req(Req, _Db) -> 78 | send_method_not_allowed(Req, "POST"). 79 | 80 | % pendant is in couch_httpd_db 81 | handle_design_info_req(#httpd{ 82 | method='GET', 83 | path_parts=[_DbName, _Design, DesignName, _, _] 84 | }=Req, Db, _DDoc) -> 85 | DesignId = <<"_design/", DesignName/binary>>, 86 | {ok, GroupInfoList} = couch_spatial:get_group_info(Db, DesignId), 87 | send_json(Req, 200, {[ 88 | {name, DesignName}, 89 | {spatial_index, {GroupInfoList}} 90 | ]}); 91 | handle_design_info_req(Req, _Db, _DDoc) -> 92 | send_method_not_allowed(Req, "GET"). 93 | 94 | 95 | load_index(Req, Db, {DesignId, SpatialName}) -> 96 | QueryArgs = parse_spatial_params(Req), 97 | Stale = QueryArgs#spatial_query_args.stale, 98 | case couch_spatial:get_spatial_index(Db, DesignId, SpatialName, Stale) of 99 | {ok, Index, Group} -> 100 | {ok, Index, Group, QueryArgs}; 101 | {not_found, Reason} -> 102 | throw({not_found, Reason}) 103 | end. 104 | 105 | %output_spatial_index(Req, Index, Group, Db, 106 | % QueryArgs#spatial_query_args{count=true}) -> 107 | output_spatial_index(Req, Index, Group, _Db, QueryArgs) when 108 | QueryArgs#spatial_query_args.count == true -> 109 | Count = vtree:count_lookup(Group#spatial_group.fd, 110 | Index#spatial.treepos, 111 | QueryArgs#spatial_query_args.bbox), 112 | send_json(Req, {[{"count",Count}]}); 113 | 114 | % counterpart in couch_httpd_view is output_map_view/6 115 | output_spatial_index(Req, Index, Group, Db, QueryArgs) -> 116 | #spatial_query_args{ 117 | bbox = Bbox, 118 | bounds = Bounds, 119 | limit = Limit, 120 | skip = SkipCount 121 | } = QueryArgs, 122 | CurrentEtag = spatial_etag(Db, Group, Index), 123 | HelperFuns = #spatial_fold_helper_funs{ 124 | start_response = fun json_spatial_start_resp/3, 125 | send_row = fun send_json_spatial_row/3 126 | }, 127 | couch_httpd:etag_respond(Req, CurrentEtag, fun() -> 128 | FoldFun = make_spatial_fold_funs( 129 | Req, QueryArgs, CurrentEtag, Db, 130 | Group#spatial_group.current_seq, HelperFuns), 131 | FoldAccInit = {Limit, SkipCount, undefined, ""}, 132 | % In this case the accumulator consists of the response (which 133 | % might be undefined) and the actual accumulator we only care 134 | % about in spatiallist functions) 135 | {ok, {_, _, Resp, _}} = couch_spatial:fold( 136 | Index, FoldFun, FoldAccInit, Bbox, Bounds), 137 | finish_spatial_fold(Req, Resp) 138 | end). 139 | 140 | % counterpart in couch_httpd_view is make_view_fold/7 141 | make_spatial_fold_funs(Req, _QueryArgs, Etag, _Db, UpdateSeq, HelperFuns) -> 142 | #spatial_fold_helper_funs{ 143 | start_response = StartRespFun, 144 | send_row = SendRowFun 145 | } = HelperFuns, 146 | % The Acc is there to output characters that belong to the previous line, 147 | % but only if one line follows (think of a comma separated list which 148 | % doesn't have a comma at the last item) 149 | fun({{Bbox, DocId}, {Geom, Value}}, {AccLimit, AccSkip, Resp, Acc}) -> 150 | case {AccLimit, AccSkip, Resp} of 151 | {0, _, _} -> 152 | % we've done "limit" rows, stop foldling 153 | {stop, {0, 0, Resp, Acc}}; 154 | {_, AccSkip, _} when AccSkip > 0 -> 155 | % just keep skipping 156 | {ok, {AccLimit, AccSkip - 1, Resp, Acc}}; 157 | {_, _, undefined} -> 158 | % rendering the first row, first we start the response 159 | {ok, Resp2, BeginBody} = StartRespFun(Req, Etag, UpdateSeq), 160 | {Go, Acc2} = SendRowFun( 161 | Resp2, {{Bbox, DocId}, {Geom, Value}}, BeginBody), 162 | {Go, {AccLimit - 1, 0, Resp2, Acc2}}; 163 | {AccLimit, _, Resp} when (AccLimit > 0) -> 164 | % rendering all other rows 165 | {Go, Acc2} = SendRowFun(Resp, {{Bbox, DocId}, {Geom, Value}}, Acc), 166 | {Go, {AccLimit - 1, 0, Resp, Acc2}} 167 | end 168 | end. 169 | 170 | % counterpart in couch_httpd_view is finish_view_fold/5 171 | finish_spatial_fold(Req, Resp) -> 172 | case Resp of 173 | % no response was sent yet 174 | undefined -> 175 | send_json(Req, 200, {[{"rows", []}]}); 176 | Resp -> 177 | % end the index 178 | send_chunk(Resp, "\r\n]}"), 179 | end_json_response(Resp) 180 | end. 181 | 182 | % counterpart in couch_httpd_view is json_view_start_resp/6 183 | json_spatial_start_resp(Req, Etag, UpdateSeq) -> 184 | {ok, Resp} = start_json_response(Req, 200, [{"Etag", Etag}]), 185 | BeginBody = io_lib:format( 186 | "{\"update_seq\":~w,\"rows\":[\r\n", [UpdateSeq]), 187 | {ok, Resp, BeginBody}. 188 | 189 | % counterpart in couch_httpd_view is send_json_view_row/5 190 | send_json_spatial_row(Resp, {{Bbox, DocId}, {Geom, Value}}, RowFront) -> 191 | JsonObj = {[ 192 | {<<"id">>, DocId}, 193 | {<<"bbox">>, erlang:tuple_to_list(Bbox)}, 194 | {<<"geometry">>, couch_spatial_updater:geocouch_to_geojsongeom(Geom)}, 195 | {<<"value">>, Value}]}, 196 | send_chunk(Resp, RowFront ++ ?JSON_ENCODE(JsonObj)), 197 | {ok, ",\r\n"}. 198 | 199 | % counterpart in couch_httpd_view is view_group_etag/3 resp. /4 200 | spatial_etag(Db, Group, Index) -> 201 | spatial_etag(Db, Group, Index, nil). 202 | spatial_etag(_Db, #spatial_group{sig=Sig}, 203 | #spatial{update_seq=UpdateSeq, purge_seq=PurgeSeq}, Extra) -> 204 | couch_httpd:make_etag({Sig, UpdateSeq, PurgeSeq, Extra}). 205 | 206 | parse_spatial_params(Req) -> 207 | QueryList = couch_httpd:qs(Req), 208 | QueryParams = lists:foldl(fun({K, V}, Acc) -> 209 | parse_spatial_param(K, V) ++ Acc 210 | end, [], QueryList), 211 | QueryArgs = lists:foldl(fun({K, V}, Args2) -> 212 | validate_spatial_query(K, V, Args2) 213 | end, #spatial_query_args{}, lists:reverse(QueryParams)), 214 | 215 | #spatial_query_args{ 216 | bbox = Bbox, 217 | bounds = Bounds 218 | } = QueryArgs, 219 | case {Bbox, Bounds} of 220 | % Coordinates of the bounding box are flipped and no bounds for the 221 | % cartesian plane were set 222 | {{W, S, E, N}, nil} when E < W; N < S -> 223 | Msg = <<"Coordinates of the bounding box are flipped, but no bounds " 224 | "for the cartesian plane were specified " 225 | "(use the `plane_bounds` parameter)">>, 226 | throw({query_parse_error, Msg}); 227 | _ -> 228 | QueryArgs 229 | end. 230 | 231 | parse_spatial_param("bbox", Bbox) -> 232 | [{bbox, list_to_tuple(?JSON_DECODE("[" ++ Bbox ++ "]"))}]; 233 | parse_spatial_param("stale", "ok") -> 234 | [{stale, ok}]; 235 | parse_spatial_param("stale", "update_after") -> 236 | [{stale, update_after}]; 237 | parse_spatial_param("stale", _Value) -> 238 | throw({query_parse_error, 239 | <<"stale only available as stale=ok or as stale=update_after">>}); 240 | parse_spatial_param("count", "true") -> 241 | [{count, true}]; 242 | parse_spatial_param("count", _Value) -> 243 | throw({query_parse_error, <<"count only available as count=true">>}); 244 | parse_spatial_param("plane_bounds", Bounds) -> 245 | [{bounds, list_to_tuple(?JSON_DECODE("[" ++ Bounds ++ "]"))}]; 246 | parse_spatial_param("limit", Value) -> 247 | [{limit, geocouch_duplicates:parse_positive_int_param(Value)}]; 248 | parse_spatial_param("skip", Value) -> 249 | [{skip, geocouch_duplicates:parse_int_param(Value)}]; 250 | parse_spatial_param(Key, Value) -> 251 | [{extra, {Key, Value}}]. 252 | 253 | validate_spatial_query(bbox, Value, Args) -> 254 | Args#spatial_query_args{bbox=Value}; 255 | validate_spatial_query(stale, ok, Args) -> 256 | Args#spatial_query_args{stale=ok}; 257 | validate_spatial_query(stale, update_after, Args) -> 258 | Args#spatial_query_args{stale=update_after}; 259 | validate_spatial_query(stale, _, Args) -> 260 | Args; 261 | validate_spatial_query(count, true, Args) -> 262 | Args#spatial_query_args{count=true}; 263 | validate_spatial_query(bounds, Value, Args) -> 264 | Args#spatial_query_args{bounds=Value}; 265 | validate_spatial_query(limit, Value, Args) -> 266 | Args#spatial_query_args{limit=Value}; 267 | validate_spatial_query(skip, Value, Args) -> 268 | Args#spatial_query_args{skip=Value}; 269 | validate_spatial_query(extra, _Value, Args) -> 270 | Args. 271 | -------------------------------------------------------------------------------- /src/geocouch/couch_spatial_merger.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(couch_spatial_merger). 14 | 15 | %-export([query_spatial/2]). 16 | -export([parse_http_params/4, make_funs/3, get_skip_and_limit/1, 17 | http_index_folder_req_details/3, make_event_fun/2]). 18 | 19 | -include("couch_db.hrl"). 20 | -include_lib("couch_index_merger/include/couch_index_merger.hrl"). 21 | -include("couch_spatial.hrl"). 22 | 23 | -define(LOCAL, <<"local">>). 24 | 25 | % callback! 26 | parse_http_params(Req, _DDoc, _IndexName, _Extra) -> 27 | couch_httpd_spatial:parse_spatial_params(Req). 28 | 29 | % callback! 30 | make_funs(_DDoc, _IndexName, _IndexMergeParams) -> 31 | {fun spatial_less_fun/2, 32 | fun spatial_folder/6, 33 | fun merge_spatial/1, 34 | fun(NumFolders, Callback, UserAcc) -> 35 | fun(Item) -> 36 | couch_index_merger:collect_row_count( 37 | NumFolders, 0, fun spatial_row_obj/1, Callback, UserAcc, Item) 38 | end 39 | end, 40 | nil}. 41 | 42 | % callback! 43 | get_skip_and_limit(#spatial_query_args{skip=Skip, limit=Limit}) -> 44 | {Skip, Limit}. 45 | 46 | % callback! 47 | make_event_fun(_SpatialArgs, Queue) -> 48 | fun(Ev) -> 49 | http_spatial_fold(Ev, Queue) 50 | end. 51 | 52 | % callback! 53 | http_index_folder_req_details(#merged_index_spec{} = Spec, MergeParams, DDoc) -> 54 | #merged_index_spec{ 55 | url = MergeUrl0, 56 | ejson_spec = {EJson} 57 | } = Spec, 58 | #index_merge{ 59 | conn_timeout = Timeout, 60 | http_params = SpatialArgs 61 | } = MergeParams, 62 | {ok, #httpdb{url = Url, lhttpc_options = Options} = Db} = 63 | couch_index_merger:open_db(MergeUrl0, nil, Timeout), 64 | MergeUrl = Url ++ spatial_qs(SpatialArgs), 65 | Headers = [{"Content-Type", "application/json"} | Db#httpdb.headers], 66 | 67 | EJson2 = case couch_index_merger:should_check_rev(MergeParams, DDoc) of 68 | true -> 69 | P = fun (Tuple) -> element(1, Tuple) =/= <<"ddoc_revision">> end, 70 | [{<<"ddoc_revision">>, couch_index_merger:ddoc_rev_str(DDoc)} | 71 | lists:filter(P, EJson)]; 72 | false -> 73 | EJson 74 | end, 75 | 76 | Body = {EJson2}, 77 | put(from_url, Url), 78 | {MergeUrl, post, Headers, ?JSON_ENCODE(Body), Options}; 79 | 80 | http_index_folder_req_details(#simple_index_spec{} = Spec, MergeParams, _DDoc) -> 81 | #simple_index_spec{ 82 | database = DbUrl, 83 | ddoc_id = DDocId, 84 | index_name = SpatialName 85 | } = Spec, 86 | #index_merge{ 87 | conn_timeout = Timeout, 88 | http_params = SpatialArgs 89 | } = MergeParams, 90 | {ok, #httpdb{url = Url, lhttpc_options = Options}} = 91 | couch_index_merger:open_db(DbUrl, nil, Timeout), 92 | SpatialUrl = Url ++ ?b2l(DDocId) ++ "/_spatial/" ++ ?b2l(SpatialName) ++ 93 | spatial_qs(SpatialArgs), 94 | put(from_url, DbUrl), 95 | {SpatialUrl, get, [], [], Options}. 96 | 97 | spatial_row_obj({{Key, error}, Reason}) -> 98 | <<"{\"key\":", (?JSON_ENCODE(Key))/binary, 99 | ",\"error\":", 100 | (?JSON_ENCODE(couch_util:to_binary(Reason)))/binary, "}">>; 101 | spatial_row_obj({{Bbox, DocId}, {{Type, Coords}, Value}}) -> 102 | <<"{\"id\":", (?JSON_ENCODE(DocId))/binary, 103 | ",\"bbox\":", (?JSON_ENCODE(tuple_to_list(Bbox)))/binary, 104 | ",\"geometry\":", 105 | (?JSON_ENCODE({[{type, Type}, {coordinates, Coords}]}))/binary, 106 | ",\"node\":\"", (?LOCAL)/binary, "\"", 107 | ",\"value\":", (?JSON_ENCODE(Value))/binary, "}">>. 108 | 109 | spatial_less_fun(A, B) -> 110 | A < B. 111 | 112 | % Counterpart to map_view_folder/6 in couch_view_merger 113 | spatial_folder(Db, SpatialSpec, MergeParams, _UserCtx, DDoc, Queue) -> 114 | #simple_index_spec{ 115 | ddoc_database = DDocDbName, ddoc_id = DDocId, index_name = SpatialName 116 | } = SpatialSpec, 117 | #spatial_query_args{ 118 | bbox = Bbox, 119 | bounds = Bounds, 120 | stale = Stale 121 | } = MergeParams#index_merge.http_params, 122 | FoldlFun = make_spatial_fold_fun(Queue), 123 | {DDocDb, Index} = get_spatial_index(Db, DDocDbName, DDocId, 124 | SpatialName, Stale), 125 | 126 | case not(couch_index_merger:should_check_rev(MergeParams, DDoc)) orelse 127 | couch_index_merger:ddoc_unchanged(DDocDb, DDoc) of 128 | true -> 129 | % The spatial index doesn't output a total_rows property, hence 130 | % we don't need a proper row_count (but we need it in the queue to 131 | % make the index merging work correctly) 132 | ok = couch_view_merger_queue:queue(Queue, {row_count, 0}), 133 | couch_spatial:fold(Index, FoldlFun, nil, Bbox, Bounds); 134 | false -> 135 | ok = couch_view_merger_queue:queue(Queue, revision_mismatch) 136 | end, 137 | catch couch_db:close(DDocDb). 138 | 139 | % Counterpart to get_map_view/5 in couch_view_merger 140 | get_spatial_index(Db, DDocDbName, DDocId, SpatialName, Stale) -> 141 | GroupId = couch_index_merger:get_group_id(DDocDbName, DDocId), 142 | {ok, Index, _Group} = couch_spatial:get_spatial_index(Db, GroupId, 143 | SpatialName, Stale), 144 | case GroupId of 145 | {DDocDb, DDocId} -> {DDocDb, Index}; 146 | DDocId -> {nil, Index} 147 | end. 148 | 149 | % Counterpart to http_view_fold/3 in couch_view_merger 150 | http_spatial_fold(object_start, Queue) -> 151 | ok = couch_view_merger_queue:queue(Queue, {row_count, 0}), 152 | fun(Ev) -> http_spatial_fold_rows_1(Ev, Queue) end. 153 | 154 | % Counterpart to http_view_fold_rows_1/2 in couch_view_merger 155 | http_spatial_fold_rows_1({key, <<"rows">>}, Queue) -> 156 | fun(array_start) -> fun(Ev) -> http_spatial_fold_rows_2(Ev, Queue) end end; 157 | http_spatial_fold_rows_1(_Ev, Queue) -> 158 | fun(Ev) -> http_spatial_fold_rows_1(Ev, Queue) end. 159 | 160 | % Counterpart to http_view_fold_fold_rows_2/2 in couch_view_merger 161 | http_spatial_fold_rows_2(array_end, Queue) -> 162 | fun(Ev) -> http_spatial_fold_errors_1(Ev, Queue) end; 163 | http_spatial_fold_rows_2(object_start, Queue) -> 164 | fun(Ev) -> 165 | json_stream_parse:collect_object( 166 | Ev, 167 | fun(Row) -> 168 | http_spatial_fold_queue_row(Row, Queue), 169 | fun(Ev2) -> http_spatial_fold_rows_2(Ev2, Queue) end 170 | end) 171 | end. 172 | 173 | % Counterpart to http_view_fold_errors_1/2 in couch_view_merger 174 | http_spatial_fold_errors_1({key, <<"errors">>}, Queue) -> 175 | fun(array_start) -> fun(Ev) -> http_spatial_fold_errors_2(Ev, Queue) end end; 176 | http_spatial_fold_errors_1(_Ev, _Queue) -> 177 | fun couch_index_merger:void_event/1. 178 | 179 | % Counterpart to http_view_fold_errors_2/2 in couch_view_merger 180 | http_spatial_fold_errors_2(array_end, _Queue) -> 181 | fun couch_index_merger:void_event/1; 182 | http_spatial_fold_errors_2(object_start, Queue) -> 183 | fun(Ev) -> 184 | json_stream_parse:collect_object( 185 | Ev, 186 | fun(Error) -> 187 | http_view_fold_queue_error(Error, Queue), 188 | fun(Ev2) -> http_spatial_fold_errors_2(Ev2, Queue) end 189 | end) 190 | end. 191 | 192 | % Carbon copy of http_view_fold_queue_error/2 in couch_view_merger 193 | http_view_fold_queue_error({Props}, Queue) -> 194 | From0 = couch_util:get_value(<<"from">>, Props, ?LOCAL), 195 | From = case From0 of 196 | ?LOCAL -> 197 | get(from_url); 198 | _ -> 199 | From0 200 | end, 201 | Reason = couch_util:get_value(<<"reason">>, Props, null), 202 | ok = couch_view_merger_queue:queue(Queue, {error, From, Reason}). 203 | 204 | % Counterpart to http_view_fold_queue_row/2 in couch_view_merger 205 | % Used for merges of remote DBs 206 | http_spatial_fold_queue_row({Props}, Queue) -> 207 | Id = couch_util:get_value(<<"id">>, Props, nil), 208 | Bbox = couch_util:get_value(<<"bbox">>, Props, null), 209 | {Geom} = couch_util:get_value(<<"geometry">>, Props, null), 210 | Val = couch_util:get_value(<<"value">>, Props), 211 | Row = case couch_util:get_value(<<"error">>, Props, nil) of 212 | nil -> 213 | GeomType = couch_util:get_value(<<"type">>, Geom), 214 | Coords = couch_util:get_value(<<"coordinates">>, Geom), 215 | case couch_util:get_value(<<"doc">>, Props, nil) of 216 | nil -> 217 | {{list_to_tuple(Bbox), Id}, {{GeomType,Coords}, Val}}; 218 | % NOTE vmx 20110818: GeoCouch doesn't support include_docs atm, 219 | % but I'll just leave the code here 220 | Doc -> 221 | {{list_to_tuple(Bbox), Id}, {{GeomType,Coords}, Val}, {doc, Doc}} 222 | end; 223 | Error -> 224 | % error in a map row 225 | {{list_to_tuple(Bbox), error}, Error} 226 | end, 227 | ok = couch_view_merger_queue:queue(Queue, Row). 228 | 229 | 230 | 231 | % Counterpart to make_map_fold_fun/4 in couch_view_merger 232 | % Used for merges of local DBs 233 | make_spatial_fold_fun(Queue) -> 234 | fun({{_Bbox, _DocId}, {_Geom, _Value}}=Row, Acc) -> 235 | ok = couch_view_merger_queue:queue(Queue, Row), 236 | {ok, Acc} 237 | end. 238 | 239 | % Counterpart to merge_map_views/6 in couch_view_merger 240 | merge_spatial(#merge_params{limit = 0} = Params) -> 241 | couch_index_merger:merge_indexes_no_limit(Params); 242 | 243 | merge_spatial(#merge_params{row_acc = []} = Params) -> 244 | case couch_index_merger:merge_indexes_no_acc( 245 | Params, fun merge_spatial_min_row/2) of 246 | {params, Params2} -> 247 | merge_spatial(Params2); 248 | Else -> 249 | Else 250 | end; 251 | 252 | % ??? vmx 20110805: Does this case ever happen in the spatial index? 253 | merge_spatial(Params) -> 254 | Params2 = couch_index_merger:handle_skip(Params), 255 | merge_spatial(Params2). 256 | 257 | % Counterpart to merge_map_min_row/2 in couch_view_merger 258 | merge_spatial_min_row(Params, MinRow) -> 259 | ok = couch_view_merger_queue:flush(Params#merge_params.queue), 260 | couch_index_merger:handle_skip(Params#merge_params{row_acc=[MinRow]}). 261 | 262 | % Counterpart to view_qs/1 in couch_view_merger 263 | spatial_qs(SpatialArgs) -> 264 | DefSpatialArgs = #spatial_query_args{}, 265 | #spatial_query_args{ 266 | bbox = Bbox, 267 | stale = Stale, 268 | count = Count, 269 | bounds = Bounds 270 | } = SpatialArgs, 271 | QsList = case Bbox =:= DefSpatialArgs#spatial_query_args.bbox of 272 | true -> 273 | []; 274 | false -> 275 | ["bbox=" ++ ?b2l(iolist_to_binary( 276 | lists:nth(2, hd(io_lib:format("~p", [Bbox])))))] 277 | end ++ 278 | case Stale =:= DefSpatialArgs#spatial_query_args.stale of 279 | true -> 280 | []; 281 | false -> 282 | ["stale=" ++ atom_to_list(Stale)] 283 | end ++ 284 | case Count =:= DefSpatialArgs#spatial_query_args.count of 285 | true -> 286 | []; 287 | false -> 288 | ["count=" ++ atom_to_list(Count)] 289 | end ++ 290 | case Bounds =:= DefSpatialArgs#spatial_query_args.bounds of 291 | true -> 292 | []; 293 | false -> 294 | ["bounds=" ++ ?b2l(iolist_to_binary( 295 | lists:nth(2, hd(io_lib:format("~p", [Bounds])))))] 296 | end, 297 | case QsList of 298 | [] -> 299 | []; 300 | _ -> 301 | "?" ++ string:join(QsList, "&") 302 | end. 303 | -------------------------------------------------------------------------------- /test/100-updater.t: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | %% -*- erlang -*- 3 | 4 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 | % use this file except in compliance with the License. You may obtain a copy of 6 | % the License at 7 | % 8 | % http://www.apache.org/licenses/LICENSE-2.0 9 | % 10 | % Unless required by applicable law or agreed to in writing, software 11 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | % License for the specific language governing permissions and limitations under 14 | % the License. 15 | 16 | -define(MOD, couch_spatial_updater). 17 | -define(JSON_ENCODE(V), ejson:encode(V)). 18 | 19 | main(_) -> 20 | code:add_pathz(filename:dirname(escript:script_name())), 21 | gc_test_util:init_code_path(), 22 | etap:plan(28), 23 | case (catch test()) of 24 | ok -> 25 | etap:end_tests(); 26 | Other -> 27 | etap:diag(io_lib:format("Test died abnormally: ~p", [Other])), 28 | etap:bail(Other) 29 | end, 30 | ok. 31 | 32 | test() -> 33 | test_bbox(), 34 | test_bbox_initbbox(), 35 | test_extract_bbox(), 36 | test_process_result_geometrycollection(), 37 | test_process_result_point(), 38 | test_process_result_point_bbox(), 39 | test_process_result_linestring(), 40 | test_process_result_linestring_toosmallbbox(), 41 | test_geojsongeom_to_geocouch_point(), 42 | test_geojsongeom_to_geocouch_linestring(), 43 | test_geojsongeom_to_geocouch_geometrycollection(), 44 | test_geojsongeom_to_geocouch_nested_geometrycollection(), 45 | test_geocouch_to_geojsongeom_point(), 46 | test_geocouch_to_geojsongeom_linestring(), 47 | test_geocouch_to_geojsongeom_geometrycollection(), 48 | test_geocouch_to_geojsongeom_nested_geometrycollection(), 49 | ok. 50 | 51 | % The tests are based on the examples of the GeoJSON format specification 52 | test_bbox() -> 53 | etap:is(?MOD:bbox([[100.0, 0.0], [101.0, 1.0]], nil), 54 | [100.0, 0.0, 101.0, 1.0], 55 | "Bounding box of LineString with 2 points (initial bbox==nil)"), 56 | etap:is(?MOD:bbox([[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], 57 | [100.0, 0.0]], nil), 58 | [100.0, 0.0, 101.0, 1.0], 59 | "Bounding box of LineString with 4 points (initial bbox==nil)"), 60 | 61 | etap:is(?MOD:bbox([[-10.0, 0.0, 50.4, 58.69], [101.0, -1.0, -72.8, 9.5]], 62 | nil), 63 | [-10.0, -1.0, -72.8, 9.5, 101.0, 0.0, 50.4, 58.69], 64 | "Bounding box of LineString with 4 dimensional coordinates " 65 | "(initial bbox==nil)"). 66 | 67 | test_bbox_initbbox() -> 68 | etap:is(?MOD:bbox([[100.0, 0.0], [110.0, 1.0]], 69 | [105.4, 20.3, 200.36, 0.378]), 70 | [100.0, 0.0, 200.36, 1.0], 71 | "Bounding box with initial bounding box (a)"), 72 | etap:is(?MOD:bbox([[100.0, 0.0], [110.0, 1.0]], 73 | {[105.4, 20.3], [200.36, 0.378]}), 74 | [100.0, 0.0, 200.36, 1.0], 75 | "Bounding box with initial bounding box (b)"). 76 | 77 | test_extract_bbox() -> 78 | etap:is(?MOD:extract_bbox('Point', [100.0, 0.0]), 79 | [100.0, 0.0, 100.0, 0.0], 80 | "Extract bounding box of a Point"), 81 | etap:is(?MOD:extract_bbox('LineString', [[100.0, 0.0], [101.0, 1.0]]), 82 | [100.0, 0.0, 101.0, 1.0], 83 | "Extract bounding box of a LineString"), 84 | etap:is(?MOD:extract_bbox('Polygon', [ 85 | [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], 86 | [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]] 87 | ]), 88 | [100.0, 0.0, 101.0, 1.0], 89 | "Extract bounding box of a Polygon"), 90 | etap:is(?MOD:extract_bbox('MultiLineString', [ 91 | [[100.0, 0.0], [101.0, 1.0]], 92 | [[102.0, 2.0], [103.0, 3.0]] 93 | ]), 94 | [100.0, 0.0, 103.0, 3.0], 95 | "Extract bounding box of a Polygon"), 96 | etap:is(?MOD:extract_bbox('MultiPolygon', [ 97 | [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], 98 | [102.0, 2.0]]], 99 | [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], 100 | [100.0, 0.0]], 101 | [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], 102 | [100.2, 0.2]]] 103 | ]), 104 | [100.0, 0.0, 103.0, 3.0], 105 | "Extract bounding box of a MultiPolygon"). 106 | 107 | 108 | 109 | test_process_result_geometrycollection() -> 110 | Geojson = {[{<<"type">>,<<"GeometryCollection">>}, 111 | {<<"geometries">>, 112 | [{[{<<"type">>,<<"Point">>}, 113 | {<<"coordinates">>,[100.0,0.0]}]}, 114 | {[{<<"type">>,<<"LineString">>}, 115 | {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}]}]}]}, 116 | {Bbox, {Geom, <<"somedoc">>}} = ?MOD:process_result( 117 | {?JSON_ENCODE(Geojson), ?JSON_ENCODE(<<"somedoc">>)}), 118 | etap:is(Geom, 119 | {'GeometryCollection', [ 120 | {'Point', [100.0,0.0]}, 121 | {'LineString', [[101.0,0.0],[102.0,1.0]]}]}, 122 | "GeometryCollection was processed correctly"), 123 | etap:is(Bbox, 124 | {100.0, 0.0, 102.0, 1.0}, 125 | "Bounding box of GeometryCollection is correct"). 126 | 127 | % XXX vmx (2011-02-16) Nested GeometryCollections are currently not supported 128 | %process_result_nested_geometrycollection() -> 129 | % Geojson = {[{<<"type">>,<<"GeometryCollection">>}, 130 | % {<<"geometries">>, 131 | % [{[{<<"type">>,<<"GeometryCollection">>}, 132 | % {<<"geometries">>, 133 | % [{[{<<"type">>,<<"Point">>}, 134 | % {<<"coordinates">>,[100.0,0.0]}]}, 135 | % {[{<<"type">>,<<"LineString">>}, 136 | % {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}]}]}]} 137 | % ]}]}, 138 | % {Bbox, {Geom, <<"somedoc">>}} = process_result([Geojson, <<"somedoc">>]), 139 | % ?assertEqual({'GeometryCollection', [{'GeometryCollection', [ 140 | % {'Point', [100.0,0.0]}, 141 | % {'LineString', [[101.0,0.0],[102.0,1.0]]}]}]}, 142 | % Geom), 143 | % ?assertEqual({100.0, 0.0, 102.0, 1.0}, Bbox). 144 | 145 | % XXX vmx (2011-03-09) Need to find a way to test failures with etap 146 | %test_process_result_geometrycollection_fail() -> 147 | % % collection contains geometries with different dimensions 148 | % Geojson = {[{<<"type">>,<<"GeometryCollection">>}, 149 | % {<<"geometries">>, 150 | % [{[{<<"type">>,<<"Point">>}, 151 | % {<<"coordinates">>,[100.0,0.0,54.5]}]}, 152 | % {[{<<"type">>,<<"LineString">>}, 153 | % {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}]}]}]}, 154 | % ?assertError(function_clause, process_result([Geojson, <<"somedoc">>])). 155 | 156 | test_process_result_point() -> 157 | Geojson = {[{<<"type">>,<<"Point">>}, 158 | {<<"coordinates">>,[100.0,0.0]}]}, 159 | {Bbox, {Geom, <<"somedoc">>}} = ?MOD:process_result( 160 | {?JSON_ENCODE(Geojson), ?JSON_ENCODE(<<"somedoc">>)}), 161 | etap:is(Geom, {'Point', [100.0,0.0]}, 162 | "Point was processed correctly"), 163 | etap:is(Bbox, {100.0, 0.0, 100.0, 0.0}, 164 | "Bounding box of Point is correct"). 165 | 166 | test_process_result_point_bbox() -> 167 | Geojson = {[{<<"type">>,<<"Point">>}, 168 | {<<"coordinates">>,[100.0,0.0]}, 169 | {<<"bbox">>,[100.0,0.0,105.54,8.614]}]}, 170 | {Bbox, {Geom, <<"somedoc">>}} = ?MOD:process_result( 171 | {?JSON_ENCODE(Geojson), ?JSON_ENCODE(<<"somedoc">>)}), 172 | etap:is(Geom, {'Point', [100.0,0.0]}, 173 | "Point was processed correctly (with pre set bounding box)"), 174 | etap:is(Bbox, {100.0, 0.0, 105.54, 8.614}, 175 | "Bounding box of Point is correct (with pre set bounding box)"). 176 | 177 | 178 | test_process_result_linestring() -> 179 | Geojson = {[{<<"type">>,<<"LineString">>}, 180 | {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}]}, 181 | {Bbox, {Geom, <<"somedoc">>}} = ?MOD:process_result( 182 | {?JSON_ENCODE(Geojson), ?JSON_ENCODE(<<"somedoc">>)}), 183 | etap:is(Geom, {'LineString', [[101.0,0.0],[102.0,1.0]]}, 184 | "LineString was processed correctly"), 185 | etap:is(Bbox, {101.0, 0.0, 102.0, 1.0}, 186 | "Bounding box of LineString is correct"). 187 | 188 | 189 | test_process_result_linestring_toosmallbbox() -> 190 | Geojson = {[{<<"type">>,<<"LineString">>}, 191 | {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}, 192 | {<<"bbox">>,[101.0,0.0,101.54,0.614]}]}, 193 | {Bbox, {Geom, <<"somedoc">>}} = ?MOD:process_result( 194 | {?JSON_ENCODE(Geojson), ?JSON_ENCODE(<<"somedoc">>)}), 195 | etap:is(Geom, {'LineString', [[101.0,0.0],[102.0,1.0]]}, 196 | "LineString was processed correctly (with too small bounding box)"), 197 | etap:is(Bbox, {101.0, 0.0, 101.54, 0.614}, 198 | "Bounding box of LineString is correct (with too small bounding box)"). 199 | 200 | 201 | test_geojsongeom_to_geocouch_point() -> 202 | Geojson = [{<<"type">>,<<"Point">>}, 203 | {<<"coordinates">>,[100.0,0.0]}], 204 | Geom = ?MOD:geojsongeom_to_geocouch(Geojson), 205 | etap:is(Geom, {'Point', [100.0,0.0]}, 206 | "Transform Point from GeoJSON to a GeoCouch geometry"). 207 | 208 | test_geojsongeom_to_geocouch_linestring() -> 209 | Geojson = [{<<"type">>,<<"LineString">>}, 210 | {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}], 211 | Geom = ?MOD:geojsongeom_to_geocouch(Geojson), 212 | etap:is(Geom, {'LineString', [[101.0,0.0],[102.0,1.0]]}, 213 | "Transform LineString from GeoJSON to a GeoCouch geometry"). 214 | 215 | test_geojsongeom_to_geocouch_geometrycollection() -> 216 | Geojson = [{<<"type">>,<<"GeometryCollection">>}, 217 | {<<"geometries">>, 218 | [{[{<<"type">>,<<"Point">>}, 219 | {<<"coordinates">>,[100.0,0.0]}]}, 220 | {[{<<"type">>,<<"LineString">>}, 221 | {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}]}]}], 222 | Geom = ?MOD:geojsongeom_to_geocouch(Geojson), 223 | etap:is(Geom, {'GeometryCollection', [ 224 | {'Point', [100.0,0.0]}, 225 | {'LineString', [[101.0,0.0],[102.0,1.0]]}]}, 226 | "Transform GeometryCollection from GeoJSON to a GeoCouch geometry"). 227 | 228 | test_geojsongeom_to_geocouch_nested_geometrycollection() -> 229 | Geojson = [{<<"type">>,<<"GeometryCollection">>}, 230 | {<<"geometries">>, 231 | [{[{<<"type">>,<<"GeometryCollection">>}, 232 | {<<"geometries">>, 233 | [{[{<<"type">>,<<"Point">>}, 234 | {<<"coordinates">>,[100.0,0.0]}]}, 235 | {[{<<"type">>,<<"LineString">>}, 236 | {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}]}]}]} 237 | ]}], 238 | Geom = ?MOD:geojsongeom_to_geocouch(Geojson), 239 | etap:is(Geom, {'GeometryCollection', [{'GeometryCollection', [ 240 | {'Point', [100.0,0.0]}, 241 | {'LineString', [[101.0,0.0],[102.0,1.0]]}]}]}, 242 | "Transform nested GeometryCollection from GeoJSON to a " 243 | "GeoCouch geometry"). 244 | 245 | 246 | test_geocouch_to_geojsongeom_point() -> 247 | Geom = {'Point', [100.0,0.0]}, 248 | Geojson = ?MOD:geocouch_to_geojsongeom(Geom), 249 | etap:is(Geojson, 250 | {[{<<"type">>,'Point'}, {<<"coordinates">>,[100.0,0.0]}]}, 251 | "Transform Point from GeoCouch geometry to GeoJSON"). 252 | 253 | test_geocouch_to_geojsongeom_linestring() -> 254 | Geom = {'LineString', [[101.0,0.0],[102.0,1.0]]}, 255 | Geojson = ?MOD:geocouch_to_geojsongeom(Geom), 256 | etap:is(Geojson, 257 | {[{<<"type">>,'LineString'}, 258 | {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}]}, 259 | "Transform LineString from GeoCouch geometry to GeoJSON"). 260 | 261 | test_geocouch_to_geojsongeom_geometrycollection() -> 262 | Geom = {'GeometryCollection', [ 263 | {'Point', [100.0,0.0]}, 264 | {'LineString', [[101.0,0.0],[102.0,1.0]]}]}, 265 | Geojson = ?MOD:geocouch_to_geojsongeom(Geom), 266 | etap:is(Geojson, 267 | {[{<<"type">>,'GeometryCollection'}, 268 | {"geometries", 269 | [{[{<<"type">>,'Point'}, 270 | {<<"coordinates">>,[100.0,0.0]}]}, 271 | {[{<<"type">>,'LineString'}, 272 | {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}]}]}]}, 273 | "Transform GeometryCollection from GeoCouch geometry to GeoJSON"). 274 | 275 | test_geocouch_to_geojsongeom_nested_geometrycollection() -> 276 | Geom = {'GeometryCollection', [{'GeometryCollection', [ 277 | {'Point', [100.0,0.0]}, 278 | {'LineString', [[101.0,0.0],[102.0,1.0]]}]}]}, 279 | Geojson = ?MOD:geocouch_to_geojsongeom(Geom), 280 | etap:is(Geojson, 281 | {[{<<"type">>,'GeometryCollection'}, 282 | {"geometries", 283 | [{[{<<"type">>,'GeometryCollection'}, 284 | {"geometries", 285 | [{[{<<"type">>,'Point'}, 286 | {<<"coordinates">>,[100.0,0.0]}]}, 287 | {[{<<"type">>,'LineString'}, 288 | {<<"coordinates">>,[[101.0,0.0],[102.0,1.0]]}]}]}]} 289 | ]}]}, 290 | "Transform nested GeometryCollection from GeoCouch geometry " 291 | "to GeoJSON"). 292 | -------------------------------------------------------------------------------- /share/www/script/test/list_spatial.js: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | // use this file except in compliance with the License. You may obtain a copy of 3 | // the License at 4 | // 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // 7 | // Unless required by applicable law or agreed to in writing, software 8 | // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | // License for the specific language governing permissions and limitations under 11 | // the License. 12 | 13 | couchTests.list_spatial = function(debug) { 14 | var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"}); 15 | db.deleteDb(); 16 | db.createDb(); 17 | if (debug) debugger; 18 | 19 | var designDoc = { 20 | _id:"_design/lists", 21 | language: "javascript", 22 | spatial : { 23 | basicIndex : stringFun(function(doc) { 24 | emit({ 25 | type: "Point", 26 | coordinates: [doc.loc[0], doc.loc[1]] 27 | }, doc.string); 28 | }) 29 | }, 30 | lists: { 31 | basicBasic : stringFun(function(head, req) { 32 | send("head"); 33 | var row; 34 | while(row = getRow()) { 35 | log("row: "+toJSON(row)); 36 | send(row.id); 37 | //send("row"); 38 | }; 39 | return "tail"; 40 | }), 41 | basicJSON : stringFun(function(head, req) { 42 | start({"headers":{"Content-Type" : "application/json"}}); 43 | send('{"head":'+toJSON(head)+', '); 44 | send('"req":'+toJSON(req)+', '); 45 | send('"rows":['); 46 | var row, sep = ''; 47 | while (row = getRow()) { 48 | send(sep + toJSON(row)); 49 | sep = ', '; 50 | } 51 | return "]}"; 52 | }), 53 | simpleForm: stringFun(function(head, req) { 54 | log("simpleForm"); 55 | send('
    '); 56 | var row, row_number = 0, prevBbox, firstBbox = null; 57 | while (row = getRow()) { 58 | row_number += 1; 59 | if (!firstBbox) firstBbox = row.bbox; 60 | prevBbox = row.bbox; 61 | send('\n
  • Bbox: '+row.bbox 62 | +' Value: '+row.value 63 | +' LineNo: '+row_number+'
  • '); 64 | } 65 | return '

FirstBbox: '+ firstBbox + 66 | ' LastBbox: '+ prevBbox+'

'; 67 | }), 68 | acceptSwitch: stringFun(function(head, req) { 69 | // respondWith takes care of setting the proper headers 70 | provides("html", function() { 71 | send("HTML
    "); 72 | 73 | var row, num = 0; 74 | while (row = getRow()) { 75 | num ++; 76 | send('\n
  • Bbox: ' 77 | +row.bbox+' Value: '+row.value 78 | +' LineNo: '+num+'
  • '); 79 | } 80 | 81 | // tail 82 | return '
'; 83 | }); 84 | 85 | provides("xml", function() { 86 | send('' 87 | +'Test XML Feed'); 88 | 89 | while (row = getRow()) { 90 | var entry = new XML(''); 91 | entry.id = row.id; 92 | entry.title = row.bbox; 93 | entry.content = row.value; 94 | send(entry); 95 | } 96 | return ""; 97 | }); 98 | }), 99 | qsParams: stringFun(function(head, req) { 100 | return toJSON(req.query) + "\n"; 101 | }), 102 | stopIter: stringFun(function(req) { 103 | send("head"); 104 | var row, row_number = 0; 105 | while(row = getRow()) { 106 | if(row_number > 2) break; 107 | send(" " + row_number); 108 | row_number += 1; 109 | }; 110 | return " tail"; 111 | }), 112 | stopIter2: stringFun(function(head, req) { 113 | provides("html", function() { 114 | send("head"); 115 | var row, row_number = 0; 116 | while(row = getRow()) { 117 | if(row_number > 2) break; 118 | send(" " + row_number); 119 | row_number += 1; 120 | }; 121 | return " tail"; 122 | }); 123 | }), 124 | tooManyGetRows : stringFun(function() { 125 | send("head"); 126 | var row; 127 | while(row = getRow()) { 128 | send(row.id); 129 | }; 130 | getRow(); 131 | getRow(); 132 | getRow(); 133 | row = getRow(); 134 | return "after row: "+toJSON(row); 135 | }), 136 | emptyList: stringFun(function() { 137 | return " "; 138 | }), 139 | rowError : stringFun(function(head, req) { 140 | send("head"); 141 | var row = getRow(); 142 | send(fooBarBam); // intentional error 143 | return "tail"; 144 | }), 145 | listWithCommonJs: stringFun(function() { 146 | var lib = require('somelib'); 147 | return lib.type; 148 | }), 149 | properties: stringFun(function() { 150 | start({"headers":{"Content-Type" : "application/json"}}); 151 | return JSON.stringify(getRow()); 152 | }) 153 | }, 154 | somelib: "exports.type = 'point';" 155 | }; 156 | var indexOnlyDesignDoc = { 157 | _id:"_design/indexes", 158 | language: "javascript", 159 | spatial : { 160 | basicIndex : stringFun(function(doc) { 161 | emit({ 162 | type: "Point", 163 | coordinates: [doc.loc[0], doc.loc[1]] 164 | }, doc.string); 165 | }) 166 | } 167 | }; 168 | var erlListDoc = { 169 | _id: "_design/erlang", 170 | language: "erlang", 171 | lists: { 172 | simple: 173 | 'fun(Head, {Req}) -> ' + 174 | ' Send(<<"[">>), ' + 175 | ' Fun = fun({Row}, Sep) -> ' + 176 | ' Val = proplists:get_value(<<"bbox">>, Row, 23), ' + 177 | ' Send(list_to_binary(Sep ++ ' + 178 | ' lists:flatten(io_lib:format("~p", [Val])))), ' + 179 | ' {ok, ","} ' + 180 | ' end, ' + 181 | ' {ok, _} = FoldRows(Fun, ""), ' + 182 | ' Send(<<"]">>) ' + 183 | 'end.' 184 | } 185 | }; 186 | 187 | T(db.save(designDoc).ok); 188 | 189 | function makeSpatialDocs(start, end, templateDoc) { 190 | var docs = makeDocs(start, end, templateDoc); 191 | for (var i=0; i<\/ul>/.test(xhr.responseText)); 257 | 258 | //too many Get Rows 259 | xhr = CouchDB.request("GET", url_pre + "tooManyGetRows/basicIndex" + url_bbox); 260 | T(xhr.status == 200, "tooManyGetRows"); 261 | T(/after row: null/.test(xhr.responseText)); 262 | 263 | 264 | // limit and skip tests 265 | url = url_pre + "basicBasic/basicIndex" + url_bbox; 266 | xhr = CouchDB.request("GET", url + '&skip=3'); 267 | TEquals(15, xhr.responseText.length, "skip 3"); 268 | 269 | xhr = CouchDB.request("GET", url + '&limit=5'); 270 | TEquals(13, xhr.responseText.length, "limit 5"); 271 | 272 | xhr = CouchDB.request("GET", url + '&skip=4&limit=3'); 273 | TEquals(11, xhr.responseText.length, "skip 4, limit 3"); 274 | 275 | xhr = CouchDB.request("GET", url + '&skip=4&limit=31'); 276 | TEquals(14, xhr.responseText.length, "skip 4, limit > total"); 277 | 278 | xhr = CouchDB.request("GET", url + "&skip=1&limit=4"); 279 | // remove "tail" 280 | resp = xhr.responseText.substring(0,xhr.responseText.length-4); 281 | TEquals(8, resp.length, "skip 1, limit is 4"); 282 | xhr = CouchDB.request("GET", url + "&skip=5&limit=3"); 283 | var oldResp = resp; 284 | // remove "head" 285 | resp = xhr.responseText.substring(4); 286 | TEquals(7, resp.length, "skip 5, limit is 3"); 287 | var concatenated = oldResp + resp; 288 | xhr = CouchDB.request("GET", url + "&skip=1&limit=7"); 289 | TEquals(15, xhr.responseText.length, "skip 1, limit is 7"); 290 | TEquals(true, xhr.responseText===concatenated, 291 | "two concatenated requests are the same as a single one"); 292 | 293 | 294 | // test that etags are available 295 | xhr = CouchDB.request("GET", url_pre + "basicBasic/basicIndex" + url_bbox); 296 | etag = xhr.getResponseHeader("etag"); 297 | xhr = CouchDB.request("GET", url_pre + "basicBasic/basicIndex" + url_bbox, { 298 | headers: {"if-none-match": etag} 299 | }); 300 | T(xhr.status == 304); 301 | 302 | // verify the etags expire correctly 303 | docs = makeSpatialDocs(11, 12); 304 | db.bulkSave(docs); 305 | 306 | xhr = CouchDB.request("GET", url_pre + "simpleForm/basicIndex" + url_bbox, { 307 | headers: {"if-none-match": etag} 308 | }); 309 | T(xhr.status == 200, "etag expire"); 310 | 311 | // empty list 312 | xhr = CouchDB.request("GET", url_pre + "emptyList/basicIndex" + url_bbox); 313 | T(xhr.responseText.match(/^ $/)); 314 | 315 | xhr = CouchDB.request("GET", url_pre + "rowError/basicIndex" + url_bbox); 316 | T(/ReferenceError/.test(xhr.responseText)); 317 | 318 | // now with extra qs params 319 | xhr = CouchDB.request("GET", url_pre + "qsParams/basicIndex" + url_bbox + 320 | "&foo=blam"); 321 | T(xhr.responseText.match(/blam/)); 322 | 323 | xhr = CouchDB.request("GET", url_pre + "stopIter/basicIndex" + url_bbox); 324 | // T(xhr.getResponseHeader("Content-Type") == "text/plain"); 325 | T(xhr.responseText.match(/^head \d \d \d tail$/) && "basic stop"); 326 | 327 | xhr = CouchDB.request("GET", url_pre + "stopIter2/basicIndex" + url_bbox, { 328 | headers : { 329 | "Accept" : "text/html" 330 | } 331 | }); 332 | T(xhr.responseText.match(/^head \d \d \d tail$/) && "stop 2"); 333 | 334 | // with accept headers for HTML 335 | xhr = CouchDB.request("GET", url_pre + "acceptSwitch/basicIndex" + url_bbox, { 336 | headers: { 337 | "Accept": 'text/html' 338 | } 339 | }); 340 | T(xhr.getResponseHeader("Content-Type") == "text/html; charset=utf-8"); 341 | T(xhr.responseText.match(/HTML/)); 342 | T(xhr.responseText.match(/Value/)); 343 | 344 | // now with xml 345 | xhr = CouchDB.request("GET", url_pre + "/acceptSwitch/basicIndex" + url_bbox, { 346 | headers: { 347 | "Accept": 'application/xml' 348 | } 349 | }); 350 | T(xhr.getResponseHeader("Content-Type") == "application/xml"); 351 | T(xhr.responseText.match(/XML/)); 352 | T(xhr.responseText.match(/entry/)); 353 | 354 | // test with CommonJS module 355 | xhr = CouchDB.request("GET", url_pre + "listWithCommonJs/basicIndex" + url_bbox); 356 | T(xhr.status == 200, "standard get should be 200"); 357 | T(/point/.test(xhr.responseText)); 358 | 359 | // Test we can run lists and views from separate docs. 360 | T(db.save(indexOnlyDesignDoc).ok); 361 | var url = url_pre + "simpleForm/indexes/basicIndex" + url_bbox; 362 | xhr = CouchDB.request("GET", url); 363 | T(xhr.status == 200, "multiple design docs."); 364 | T(/-10,29,-10,29/.test(xhr.responseText)); 365 | T(/-10,33,-10,33/.test(xhr.responseText)); 366 | T(/-21,26,-21,26/.test(xhr.responseText)); 367 | 368 | var erlViewTest = function() { 369 | T(db.save(erlListDoc).ok); 370 | var url = "/test_suite_db/_design/erlang/_spatial/_list/" + 371 | "simple/indexes/basicIndex" + url_bbox; 372 | xhr = CouchDB.request("GET", url); 373 | T(xhr.status == 200, "multiple languages in design docs."); 374 | var list = JSON.parse(xhr.responseText); 375 | T(list.length == 11); 376 | T(/-10,21,-10,21/.test(xhr.responseText)); 377 | T(/-10,31,-10,31/.test(xhr.responseText)); 378 | T(/-21,26,-21,26/.test(xhr.responseText)); 379 | }; 380 | 381 | run_on_modified_server([{ 382 | section: "native_query_servers", 383 | key: "erlang", 384 | value: "{couch_native_process, start_link, []}" 385 | }], erlViewTest); 386 | 387 | 388 | // There was a bug within the code path when a parent node MBR is completely 389 | // within the bbox it is searched for, but only if it's more than 1 level 390 | // deep. Therefore we need to insert more than 40 docs as the current max 391 | // limit of a node is 40. 392 | docs = makeSpatialDocs(20, 70); 393 | db.bulkSave(docs); 394 | xhr = CouchDB.request("GET", url_pre + "emptyList/basicIndex" + url_bbox); 395 | T(xhr.responseText.match(/^ $/)); 396 | 397 | // Test if row contains bbox and geometry properties 398 | xhr = CouchDB.request("GET", url_pre + "properties/basicIndex" + url_bbox); 399 | T(xhr.status == 200, "properties"); 400 | resp = JSON.parse(xhr.responseText); 401 | TEquals(4, resp.bbox.length); 402 | TEquals("Point", resp.geometry.type); 403 | TEquals(2, resp.geometry.coordinates.length); 404 | }; 405 | -------------------------------------------------------------------------------- /src/geocouch/couch_spatial_updater.erl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -module(couch_spatial_updater). 14 | 15 | -ifdef(makecheck). 16 | -compile(export_all). 17 | -endif. 18 | 19 | 20 | -export([update/2]). 21 | 22 | % for benchmark script 23 | -export([geojson_get_bbox/1]). 24 | 25 | % for output (couch_http_spatial, couch_http_spatial_list) 26 | -export([geocouch_to_geojsongeom/1]). 27 | 28 | % for polygon search 29 | -export([extract_bbox/2, geojsongeom_to_geocouch/1]). 30 | 31 | -include("couch_db.hrl"). 32 | -include("couch_spatial.hrl"). 33 | 34 | update(Owner, Group) -> 35 | #spatial_group{ 36 | db = #db{name=DbName} = Db, 37 | name = GroupName, 38 | current_seq = Seq, 39 | indexes = Indexes 40 | %purge_seq = PurgeSeq 41 | } = Group, 42 | % XXX vmx: what are purges? when do they happen? 43 | %DbPurgeSeq = couch_db:get_purge_seq(Db), 44 | %Group2 = 45 | %if DbPurgeSeq == PurgeSeq -> 46 | % Group; 47 | %DbPurgeSeq == PurgeSeq + 1 -> 48 | % couch_task_status:update(<<"Removing purged entries from view index.">>), 49 | % purge_index(Group); 50 | %true -> 51 | % couch_task_status:update(<<"Resetting view index due to lost purge entries.">>), 52 | % % NOTE vmx:probably needs handle_info({'EXIT', FromPid, reset} 53 | % % in couch_spatial_group.erl 54 | % exit(reset) 55 | %end, 56 | 57 | %ViewEmptyKVs = [{View, []} || View <- Group2#group.views], 58 | % List of indexes with their (initially empty) results 59 | IndexEmptyKVs = [{Index, []} || Index <- Group#spatial_group.indexes], 60 | % compute on all docs modified since we last computed. 61 | TotalChanges = couch_db:count_changes_since(Db, Seq), 62 | couch_task_status:add_task([ 63 | {type, indexer}, 64 | {database, DbName}, 65 | {design_document, GroupName}, 66 | {progress, 0}, 67 | {changes_done, 0}, 68 | {total_changes, TotalChanges} 69 | ]), 70 | couch_task_status:set_update_frequency(500), 71 | {ok, MapCtx} = mapreduce:start_map_context([I#spatial.def || I <- Indexes]), 72 | EmptyResults = [[] || _ <- Indexes], 73 | 74 | {ok, _, {_,{UncomputedDocs, Group3, ViewKVsToAdd, DocIdViewIdKeys}}} 75 | = couch_db:enum_docs_since(Db, Seq, 76 | fun(DocInfo, _, {ChangesProcessed, Acc}) -> 77 | Progress = (ChangesProcessed*100) div TotalChanges, 78 | couch_task_status:update([ 79 | {progress, Progress}, 80 | {changes_done, ChangesProcessed} 81 | ]), 82 | %?LOG_DEBUG("enum_doc_since: ~p", [Acc]), 83 | Acc2 = process_doc(Db, Owner, MapCtx, EmptyResults, DocInfo, Acc), 84 | {ok, {ChangesProcessed+1, Acc2}} 85 | end, {0, {[], Group, IndexEmptyKVs, []}}, []), 86 | %?LOG_DEBUG("enum_doc_since results: ~p~n~p~n~p", [UncomputedDocs, ViewKVsToAdd, DocIdViewIdKeys]), 87 | Results = spatial_docs(MapCtx, UncomputedDocs, EmptyResults), 88 | % Output is way to huge 89 | %?LOG_DEBUG("spatial_docs results: ~p", [Results]), 90 | {ViewKVsToAdd2, DocIdViewIdKeys2} = view_insert_query_results( 91 | UncomputedDocs, Results, ViewKVsToAdd, DocIdViewIdKeys), 92 | NewSeq = couch_db:get_update_seq(Db), 93 | ?LOG_DEBUG("new seq num: ~p", [NewSeq]), 94 | {ok, Group4} = write_changes(Group3, ViewKVsToAdd2, DocIdViewIdKeys2, 95 | NewSeq), 96 | exit({new_group, Group4}). 97 | 98 | 99 | 100 | 101 | % NOTE vmx: whatever it does, it seems to be doing a good job 102 | view_insert_query_results([], [], ViewKVs, DocIdViewIdKeysAcc) -> 103 | {ViewKVs, DocIdViewIdKeysAcc}; 104 | view_insert_query_results([Doc|RestDocs], [QueryResults | RestResults], ViewKVs, DocIdViewIdKeysAcc) -> 105 | {NewViewKVs, NewViewIdKeys} = view_insert_doc_query_results(Doc, QueryResults, ViewKVs, [], []), 106 | NewDocIdViewIdKeys = [{Doc#doc.id, NewViewIdKeys} | DocIdViewIdKeysAcc], 107 | view_insert_query_results(RestDocs, RestResults, NewViewKVs, NewDocIdViewIdKeys). 108 | 109 | 110 | view_insert_doc_query_results(_Doc, [], [], ViewKVsAcc, ViewIdKeysAcc) -> 111 | {lists:reverse(ViewKVsAcc), lists:reverse(ViewIdKeysAcc)}; 112 | view_insert_doc_query_results(#doc{id=DocId}=Doc, [ResultKVs|RestResults], [{View, KVs}|RestViewKVs], ViewKVsAcc, ViewIdKeysAcc) -> 113 | % Take any identical keys and combine the values 114 | ResultKVs2 = lists:foldl( 115 | % Key is the bounding box of the geometry, 116 | % Value is a tuple of the the geometry and the actual value 117 | fun({Key,Value}, [{PrevKey,PrevVal}|AccRest]) -> 118 | case Key == PrevKey of 119 | true -> 120 | case PrevVal of 121 | {dups, Dups} -> 122 | [{PrevKey, {dups, [Value|Dups]}} | AccRest]; 123 | _ -> 124 | [{PrevKey, {dups, [Value,PrevVal]}} | AccRest] 125 | end; 126 | false -> 127 | [{Key,Value},{PrevKey,PrevVal}|AccRest] 128 | end; 129 | (KV, []) -> 130 | [KV] 131 | end, [], lists:sort(ResultKVs)), 132 | NewKVs = [{{Key, DocId}, Value} || {Key, Value} <- ResultKVs2], 133 | NewViewKVsAcc = [{View, NewKVs ++ KVs} | ViewKVsAcc], 134 | NewViewIdKeys = [{View#spatial.id_num, Key} || {Key, _Value} <- ResultKVs2], 135 | NewViewIdKeysAcc = NewViewIdKeys ++ ViewIdKeysAcc, 136 | view_insert_doc_query_results(Doc, RestResults, RestViewKVs, NewViewKVsAcc, NewViewIdKeysAcc). 137 | 138 | spatial_docs(MapCtx, Docs, EmptyResults) -> 139 | spatial_docs(MapCtx, Docs, EmptyResults, []). 140 | 141 | spatial_docs(_MapCtx, [], _EmptyResults, Acc) -> 142 | lists:reverse(Acc); 143 | spatial_docs(MapCtx, [Doc | RestDocs], EmptyResults, Acc) -> 144 | JsonDoc = couch_doc:to_raw_json_binary(Doc), 145 | % NOTE vmx: perhaps should map_doc renamed to something more 146 | % general as it can be used for most indexers 147 | case mapreduce:map_doc(MapCtx, JsonDoc) of 148 | {ok, FunsResults} -> 149 | % the results are a json array of function map yields like this: 150 | % [FunResults1, FunResults2 ...] 151 | % where funresults is are json arrays of key value pairs: 152 | % [{Geom1Json, Value1Json}, {Geom2Json, Value2Json}] 153 | % Convert the key, value pairs to tuples like 154 | % [{Bbox1, {Geom1, Value1}}, {Bbox2, {Geom2, Value2}}] 155 | SpatialResults = lists:map( 156 | fun(FunRs) -> 157 | case FunRs of 158 | [] -> []; 159 | % do some post-processing of the result documents 160 | FunRs -> process_results(FunRs) 161 | end 162 | end, 163 | FunsResults), 164 | spatial_docs(MapCtx, RestDocs, EmptyResults, [SpatialResults | Acc]); 165 | {error, Reason} -> 166 | ?LOG_ERROR("Error computing spatial result for document `~s`: ~p", 167 | [Doc#doc.id, Reason]), 168 | spatial_docs(MapCtx, RestDocs, EmptyResults, [EmptyResults | Acc]) 169 | end. 170 | 171 | 172 | % This fun computes once for each document 173 | % This is from an old revision (796805) of couch_view_updater 174 | process_doc(Db, Owner, MapCtx, EmptyResults, DocInfo, {Docs, Group, IndexKVs, DocIdIndexIdKeys}) -> 175 | #spatial_group{ design_options = DesignOptions } = Group, 176 | #doc_info{id=DocId, deleted=Deleted} = DocInfo, 177 | LocalSeq = proplists:get_value(<<"local_seq">>, 178 | DesignOptions, false), 179 | DocOpts = case LocalSeq of 180 | true -> 181 | [conflicts, deleted_conflicts, local_seq]; 182 | _ -> 183 | [conflicts, deleted_conflicts] 184 | end, 185 | case DocId of 186 | <> -> % we skip design docs 187 | {Docs, Group, IndexKVs, DocIdIndexIdKeys}; 188 | _ -> 189 | {Docs2, DocIdIndexIdKeys2} = 190 | if Deleted -> 191 | {Docs, [{DocId, []} | DocIdIndexIdKeys]}; 192 | true -> 193 | {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, 194 | DocOpts), 195 | {[Doc | Docs], DocIdIndexIdKeys} 196 | end, 197 | 198 | case couch_util:should_flush() of 199 | true -> 200 | Results = spatial_docs(MapCtx, Docs2, EmptyResults), 201 | {ViewKVs3, DocIdViewIdKeys3} = view_insert_query_results(Docs2, 202 | Results, IndexKVs, DocIdIndexIdKeys2), 203 | {ok, Group2} = write_changes(Group, ViewKVs3, DocIdViewIdKeys3, 204 | DocInfo#doc_info.local_seq), 205 | if is_pid(Owner) -> 206 | ok = gen_server:cast(Owner, {partial_update, self(), Group2}); 207 | true -> ok end, 208 | garbage_collect(), 209 | IndexEmptyKVs = [{Index, []} || Index <- Group#spatial_group.indexes], 210 | {[], Group2, IndexEmptyKVs, []}; 211 | false -> 212 | {Docs2, Group, IndexKVs, DocIdIndexIdKeys2} 213 | end 214 | end. 215 | 216 | 217 | write_changes(Group, IndexKeyValuesToAdd, DocIdIndexIdKeys, NewSeq) -> 218 | #spatial_group{id_btree=IdBtree, fd=Fd} = Group, 219 | AddDocIdIndexIdKeys = [{DocId, IndexIdKeys} || {DocId, IndexIdKeys} <- DocIdIndexIdKeys, IndexIdKeys /= []], 220 | RemoveDocIds = [DocId || {DocId, IndexIdKeys} <- DocIdIndexIdKeys, IndexIdKeys == []], 221 | LookupDocIds = [DocId || {DocId, _IndexIdKeys} <- DocIdIndexIdKeys], 222 | {ok, LookupResults, IdBtree2} 223 | = couch_btree:query_modify(IdBtree, LookupDocIds, AddDocIdIndexIdKeys, RemoveDocIds), 224 | KeysToRemoveByIndex = lists:foldl( 225 | fun(LookupResult, KeysToRemoveByIndexAcc) -> 226 | case LookupResult of 227 | {ok, {DocId, IndexIdKeys}} -> 228 | lists:foldl( 229 | fun({IndexId, Key}, KeysToRemoveByIndexAcc2) -> 230 | dict:append(IndexId, {Key, DocId}, KeysToRemoveByIndexAcc2) 231 | end, 232 | KeysToRemoveByIndexAcc, IndexIdKeys); 233 | {not_found, _} -> 234 | KeysToRemoveByIndexAcc 235 | end 236 | end, 237 | dict:new(), LookupResults), 238 | Indexes2 = lists:zipwith(fun(Index, {_Index, AddKeyValues}) -> 239 | KeysToRemove = couch_util:dict_find(Index#spatial.id_num, KeysToRemoveByIndex, []), 240 | %?LOG_DEBUG("storing spatial data: ~n~p~n~p~n~p", 241 | % [Index, AddKeyValues, KeysToRemove]), 242 | {ok, IndexTreePos, IndexTreeHeight} = vtree:add_remove( 243 | Fd, Index#spatial.treepos, Index#spatial.treeheight, 244 | AddKeyValues, KeysToRemove), 245 | case IndexTreePos =/= Index#spatial.treepos of 246 | true -> 247 | Index#spatial{treepos=IndexTreePos, treeheight=IndexTreeHeight, 248 | update_seq=NewSeq}; 249 | _ -> 250 | Index#spatial{treepos=IndexTreePos, treeheight=IndexTreeHeight} 251 | end 252 | end, Group#spatial_group.indexes, IndexKeyValuesToAdd), 253 | couch_file:flush(Fd), 254 | Group2 = Group#spatial_group{indexes=Indexes2, current_seq=NewSeq, id_btree=IdBtree2}, 255 | lists:foreach(fun(Index) -> 256 | ?LOG_INFO("Position of the spatial index (~p) root node: ~p", 257 | [Index#spatial.id_num, Index#spatial.treepos]) 258 | end, Indexes2), 259 | {ok, Group2}. 260 | 261 | 262 | % NOTE vmx: This is kind of ugly. This function is needed for a benchmark for 263 | % the replication filter 264 | % Return the bounding box of a GeoJSON geometry. "Geo" is wrapped in 265 | % brackets ({}) as returned from proplists:get_value() 266 | geojson_get_bbox(Geo) -> 267 | {Bbox, {_, nil}} = process_result([Geo|[nil]]), 268 | Bbox. 269 | 270 | 271 | process_results(Results) -> 272 | % NOTE vmx (2011-02-01): the ordering of the results doesn't matter 273 | % therefore we don't need to reverse the list. 274 | lists:foldl(fun(Result, Acc) -> 275 | [process_result(Result)|Acc] 276 | end, [], Results). 277 | 278 | process_result({K, V}) -> 279 | {Geo} = ?JSON_DECODE(K), 280 | Value = ?JSON_DECODE(V), 281 | Type = binary_to_atom(proplists:get_value(<<"type">>, Geo), utf8), 282 | Bbox = case Type of 283 | 'GeometryCollection' -> 284 | Geometries = proplists:get_value(<<"geometries">>, Geo), 285 | lists:foldl(fun({Geometry}, CurBbox) -> 286 | Type2 = binary_to_atom( 287 | proplists:get_value(<<"type">>, Geometry), utf8), 288 | Coords = proplists:get_value(<<"coordinates">>, Geometry), 289 | case proplists:get_value(<<"bbox">>, Geo) of 290 | undefined -> 291 | extract_bbox(Type2, Coords, CurBbox); 292 | Bbox2 -> 293 | Bbox2 294 | end 295 | end, nil, Geometries); 296 | _ -> 297 | Coords = proplists:get_value(<<"coordinates">>, Geo), 298 | case proplists:get_value(<<"bbox">>, Geo) of 299 | undefined -> 300 | extract_bbox(Type, Coords); 301 | Bbox2 -> 302 | Bbox2 303 | end 304 | end, 305 | 306 | Geom = geojsongeom_to_geocouch(Geo), 307 | {erlang:list_to_tuple(Bbox), {Geom, Value}}. 308 | 309 | 310 | extract_bbox(Type, Coords) -> 311 | extract_bbox(Type, Coords, nil). 312 | 313 | extract_bbox(Type, Coords, InitBbox) -> 314 | case Type of 315 | 'Point' -> 316 | bbox([Coords], InitBbox); 317 | 'LineString' -> 318 | bbox(Coords, InitBbox); 319 | 'Polygon' -> 320 | % holes don't matter for the bounding box 321 | bbox(hd(Coords), InitBbox); 322 | 'MultiPoint' -> 323 | bbox(Coords, InitBbox); 324 | 'MultiLineString' -> 325 | lists:foldl(fun(Linestring, CurBbox) -> 326 | bbox(Linestring, CurBbox) 327 | end, InitBbox, Coords); 328 | 'MultiPolygon' -> 329 | lists:foldl(fun(Polygon, CurBbox) -> 330 | bbox(hd(Polygon), CurBbox) 331 | end, InitBbox, Coords) 332 | end. 333 | 334 | bbox([], {Min, Max}) -> 335 | Min ++ Max; 336 | bbox([Coords|Rest], nil) -> 337 | bbox(Rest, {Coords, Coords}); 338 | bbox(Coords, Bbox) when is_list(Bbox)-> 339 | MinMax = lists:split(length(Bbox) div 2, Bbox), 340 | bbox(Coords, MinMax); 341 | bbox([Coords|Rest], {Min, Max}) -> 342 | Min2 = lists:zipwith(fun(X, Y) -> erlang:min(X,Y) end, Coords, Min), 343 | Max2 = lists:zipwith(fun(X, Y) -> erlang:max(X,Y) end, Coords, Max), 344 | bbox(Rest, {Min2, Max2}). 345 | 346 | 347 | % @doc Transforms a GeoJSON geometry (as Erlang terms), to an internal 348 | % structure 349 | geojsongeom_to_geocouch(Geom) -> 350 | Type = proplists:get_value(<<"type">>, Geom), 351 | Coords = case Type of 352 | <<"GeometryCollection">> -> 353 | Geometries = proplists:get_value(<<"geometries">>, Geom), 354 | [geojsongeom_to_geocouch(G) || {G} <- Geometries]; 355 | _ -> 356 | proplists:get_value(<<"coordinates">>, Geom) 357 | end, 358 | {binary_to_atom(Type, utf8), Coords}. 359 | 360 | % @doc Transforms internal structure to a GeoJSON geometry (as Erlang terms) 361 | geocouch_to_geojsongeom({Type, Coords}) -> 362 | Coords2 = case Type of 363 | 'GeometryCollection' -> 364 | Geoms = [geocouch_to_geojsongeom(C) || C <- Coords], 365 | {"geometries", Geoms}; 366 | _ -> 367 | {<<"coordinates">>, Coords} 368 | end, 369 | {[{<<"type">>, Type}, Coords2]}. 370 | -------------------------------------------------------------------------------- /share/www/script/test/spatial.js: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | // use this file except in compliance with the License. You may obtain a copy of 3 | // the License at 4 | // 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // 7 | // Unless required by applicable law or agreed to in writing, software 8 | // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | // License for the specific language governing permissions and limitations under 11 | // the License. 12 | 13 | couchTests.spatial = function(debug) { 14 | var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"}); 15 | db.deleteDb(); 16 | db.createDb(); 17 | 18 | if (debug) debugger; 19 | 20 | 21 | var designDoc = { 22 | _id:"_design/spatial", 23 | language: "javascript", 24 | /* This is a 1.1.x feature, disable for now 25 | views: { 26 | lib: { 27 | geo: "exports.type = 'Point';" 28 | } 29 | }, */ 30 | spatial : { 31 | basicIndex : stringFun(function(doc) { 32 | if (doc.loc) { 33 | emit({ 34 | type: "Point", 35 | coordinates: [doc.loc[0], doc.loc[1]] 36 | }, doc.string); 37 | } 38 | }), 39 | dontEmitAll : stringFun(function(doc) { 40 | if (doc._id > 5 && doc.loc) { 41 | emit({ 42 | type: "Point", 43 | coordinates: [doc.loc[0], doc.loc[1]] 44 | }, doc.string); 45 | } 46 | }), 47 | emitNothing : stringFun(function(doc) {}), 48 | geoJsonGeoms : stringFun(function(doc) { 49 | if (doc._id.substr(0,3)=="geo") { 50 | emit(doc.geom, doc.string); 51 | } 52 | }) 53 | /* This is a 1.1.x feature, disable for now 54 | withCommonJs : stringFun(function(doc) { 55 | var lib = require('views/lib/geo'); 56 | emit({ 57 | type: lib.type, 58 | coordinates: [doc.loc[0], doc.loc[1]] 59 | }, doc.string); 60 | })*/ 61 | } 62 | }; 63 | 64 | T(db.save(designDoc).ok); 65 | 66 | 67 | function makeSpatialDocs(start, end, templateDoc) { 68 | var docs = makeDocs(start, end, templateDoc); 69 | for (var i=0; i wait for 3 seconds 112 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 113 | '&stale=ok'); 114 | // wait 3 seconds for the next assertions to pass in very slow machines 115 | wait(3); 116 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 117 | '&stale=ok'); 118 | var resp = JSON.parse(xhr.responseText); 119 | TEquals(0, resp.rows.length, "should return no geometries (empty index)"); 120 | 121 | // update the index 122 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",")); 123 | var lastUpdateSeq = JSON.parse(xhr.responseText).update_seq; 124 | 125 | // stale=ok 126 | db.save({"_id": "stale1", "loc": [50000,60000]}); 127 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 128 | "&stale=ok"); 129 | T(JSON.parse(xhr.responseText).update_seq == lastUpdateSeq); 130 | wait(3); 131 | 132 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 133 | "&stale=ok"); 134 | T(JSON.parse(xhr.responseText).update_seq == lastUpdateSeq); 135 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",")); 136 | T(JSON.parse(xhr.responseText).update_seq == lastUpdateSeq+1); 137 | 138 | // stale=update_after 139 | lastUpdateSeq++; 140 | db.save({"_id": "stale2", "loc": [60000,70000]}); 141 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 142 | "&stale=update_after"); 143 | T(JSON.parse(xhr.responseText).update_seq == lastUpdateSeq); 144 | wait(3); 145 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 146 | "&stale=ok"); 147 | T(JSON.parse(xhr.responseText).update_seq == lastUpdateSeq+1); 148 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",")); 149 | T(JSON.parse(xhr.responseText).update_seq == lastUpdateSeq+1); 150 | 151 | 152 | // emit tests 153 | 154 | // spatial function that doesn't always emit 155 | bbox = [-180, -90, 180, 90]; 156 | xhr = CouchDB.request("GET", url_pre + "dontEmitAll?bbox=" + bbox.join(",")); 157 | TEquals(['6','7','8','9'], extract_ids(xhr.responseText), 158 | "should return geometries with id>5"); 159 | 160 | xhr = CouchDB.request("GET", url_pre + "emitNothing?bbox=" + bbox.join(",")); 161 | TEquals('{\"rows\":[]}\n', xhr.responseText, "nothing emitted at all"); 162 | 163 | 164 | // bounding box tests 165 | 166 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",")); 167 | TEquals(['0','1','2','3','4','5','6','7','8','9'], 168 | extract_ids(xhr.responseText), 169 | "should return all geometries"); 170 | 171 | bbox = [-20, 0, 0, 20]; 172 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",")); 173 | TEquals(['0','1','2'], extract_ids(xhr.responseText), 174 | "should return a subset of the geometries"); 175 | 176 | bbox = [0, 4, 180, 90]; 177 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",")); 178 | TEquals("{\"rows\":[]}\n", xhr.responseText, 179 | "should return no geometries"); 180 | 181 | bbox = [-18, 17, -14, 21]; 182 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",")); 183 | TEquals(['1','2','3'], extract_ids(xhr.responseText), 184 | "should also return geometry at the bounds of the bbox"); 185 | 186 | bbox = [-16, 19, -16, 19]; 187 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",")); 188 | TEquals(['2'], extract_ids(xhr.responseText), 189 | "bbox collapsed to a point should return the geometries there"); 190 | 191 | xhr = CouchDB.request("GET", url_pre + "basicIndex"); 192 | TEquals(['0','1','2','3','4','5','6','7','8','9', 'stale1', 'stale2'], 193 | extract_ids(xhr.responseText), 194 | "no bounding box given should return all geometries"); 195 | 196 | // count parameter tests 197 | 198 | bbox = [-180, -90, 180, 90]; 199 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 200 | "&count=true"); 201 | TEquals('{"count":10}\n', xhr.responseText, 202 | "should return the count of all geometries"); 203 | 204 | 205 | // GeoJSON geometry tests 206 | // NOTE vmx: (for all those tests) Should I test if the returned 207 | // bounding box is correct as well? 208 | 209 | // some geometries are based on the GeoJSON specification 210 | // http://geojson.org/geojson-spec.html (2010-08-17) 211 | var geoJsonDocs = [{"_id": "geoPoint", "geom": { "type": "Point", "coordinates": [100.0, 0.0] }}, 212 | {"_id": "geoLineString", "geom": { "type": "LineString", "coordinates":[ 213 | [100.0, 0.0], [101.0, 1.0] 214 | ]}}, 215 | {"_id": "geoPolygon", "geom": { "type": "Polygon", "coordinates": [ 216 | [ [100.0, 0.0], [101.0, 0.0], [100.0, 1.0], [100.0, 0.0] ] 217 | ]}}, 218 | {"_id": "geoPolygonWithHole", "geom": { "type": "Polygon", "coordinates": [ 219 | [ [100.0, 0.0], [101.0, 0.0], [100.0, 1.0], [100.0, 0.0] ], 220 | [ [100.2, 0.2], [100.6, 0.2], [100.2, 0.6], [100.2, 0.2] ] 221 | ]}}, 222 | {"_id": "geoMultiPoint", "geom": { "type": "MultiPoint", "coordinates": [ 223 | [100.0, 0.0], [101.0, 1.0] 224 | ]}}, 225 | {"_id": "geoMultiLineString", "geom": { "type": "MultiLineString", 226 | "coordinates": [ 227 | [ [100.0, 0.0], [101.0, 1.0] ], 228 | [ [102.0, 2.0], [103.0, 3.0] ] 229 | ] 230 | }}, 231 | {"_id": "geoMultiPolygon", "geom": { "type": "MultiPolygon", 232 | "coordinates": [ 233 | [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], 234 | [ 235 | [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], 236 | [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]] 237 | ] 238 | ] 239 | }}, 240 | {"_id": "geoGeometryCollection", "geom": { "type": "GeometryCollection", 241 | "geometries": [ 242 | { "type": "Point", "coordinates": [100.0, 0.0] }, 243 | { "type": "LineString", "coordinates": [ [101.0, 0.0], [102.0, 1.0] ]} 244 | ] 245 | }} 246 | ]; 247 | db.bulkSave(geoJsonDocs); 248 | 249 | bbox = [100.0, 0.0, 100.0, 0.0]; 250 | xhr = CouchDB.request("GET", url_pre + "geoJsonGeoms?bbox=" + bbox.join(",")); 251 | TEquals(true, /geoPoint/.test(extract_ids(xhr.responseText)), 252 | "if bounding box calculation was correct, it should at least" + 253 | " return the geoPoint"); 254 | 255 | bbox = [100.8, 0.8, 101.0, 1.0], 256 | xhr = CouchDB.request("GET", url_pre + "geoJsonGeoms?bbox=" + bbox.join(",")); 257 | TEquals(true, /geoPolygon/.test(extract_ids(xhr.responseText)), 258 | "if bounding box calculation was correct, it should at least" + 259 | " return the geoPolygon"); 260 | 261 | bbox = [100.8, 0.8, 101.0, 1.0], 262 | xhr = CouchDB.request("GET", url_pre + "geoJsonGeoms?bbox=" + bbox.join(",")); 263 | TEquals(true, /geoPolygonWithHole/.test(extract_ids(xhr.responseText)), 264 | "if bounding box calculation was correct, it should at least" + 265 | " return the geoPolygonWithHole"); 266 | 267 | bbox = [100.1, 0.8, 100.2, 1.5], 268 | xhr = CouchDB.request("GET", url_pre + "geoJsonGeoms?bbox=" + bbox.join(",")); 269 | TEquals(true, /geoMultiPoint/.test(extract_ids(xhr.responseText)), 270 | "if bounding box calculation was correct, it should at least" + 271 | " return the geoMultiPoint"); 272 | 273 | bbox = [101.2, 1.3, 101.6, 1.5]; 274 | xhr = CouchDB.request("GET", url_pre + "geoJsonGeoms?bbox=" + bbox.join(",")); 275 | TEquals(true, /geoMultiLineString/.test(extract_ids(xhr.responseText)), 276 | "if bounding box calculation was correct, it should at least" + 277 | " return the geoMultiLineString"); 278 | 279 | bbox = [101.2, 2.3, 101.6, 3.5]; 280 | xhr = CouchDB.request("GET", url_pre + "geoJsonGeoms?bbox=" + bbox.join(",")); 281 | TEquals(true, /geoMultiPolygon/.test(extract_ids(xhr.responseText)), 282 | "if bounding box calculation was correct, it should at least" + 283 | " return the geoMultiPolygon"); 284 | 285 | bbox = [102, 0, 102, 0]; 286 | xhr = CouchDB.request("GET", url_pre + "geoJsonGeoms?bbox=" + bbox.join(",")); 287 | TEquals(true, /geoGeometryCollection/.test(extract_ids(xhr.responseText)), 288 | "if bounding box calculation was correct, it should at least" + 289 | " return the geoGeometryCollection"); 290 | 291 | // Test if all geometries were serialised correctly 292 | bbox = [90, -1, 110, 10]; 293 | xhr = CouchDB.request("GET", url_pre + "geoJsonGeoms?bbox=" + 294 | bbox.join(",")); 295 | TEquals(geoJsonDocs[0].geom, getGeomById(xhr.responseText, 'geoPoint'), 296 | 'Point was serialised correctly'); 297 | TEquals(geoJsonDocs[1].geom, getGeomById(xhr.responseText, 'geoLineString'), 298 | 'LineString was serialised correctly'); 299 | TEquals(geoJsonDocs[2].geom, getGeomById(xhr.responseText, 'geoPolygon'), 300 | 'Polygon was serialised correctly'); 301 | TEquals(geoJsonDocs[3].geom, 302 | getGeomById(xhr.responseText, 'geoPolygonWithHole'), 303 | 'Polygon (with holw) was serialised correctly'); 304 | TEquals(geoJsonDocs[4].geom, getGeomById(xhr.responseText, 'geoMultiPoint'), 305 | 'MultiPoint was serialised correctly'); 306 | TEquals(geoJsonDocs[5].geom, 307 | getGeomById(xhr.responseText, 'geoMultiLineString'), 308 | 'point MultiLineString serialised correctly'); 309 | TEquals(geoJsonDocs[6].geom, 310 | getGeomById(xhr.responseText, 'geoMultiPolygon'), 311 | 'MultiPolygon was serialised correctly'); 312 | TEquals(geoJsonDocs[7].geom, 313 | getGeomById(xhr.responseText, 'geoGeometryCollection'), 314 | 'GeometryCollection was serialised correctly'); 315 | 316 | 317 | // Test plane wrapping 318 | 319 | // Put one doc in every quadrant of the pole/dateline tests 320 | db.bulkSave([ 321 | {_id: 'wrap1', loc: [-30, 50]}, 322 | {_id: 'wrap2', loc: [-15, 50]}, 323 | {_id: 'wrap3', loc: [10, 50]}, 324 | {_id: 'wrap4', loc: [-30, 22]}, 325 | {_id: 'wrap5', loc: [-15, 22]}, 326 | {_id: 'wrap6', loc: [10, 22]}, 327 | {_id: 'wrap7', loc: [-30, 5]}, 328 | {_id: 'wrap8', loc: [-15, 5]}, 329 | {_id: 'wrap9', loc: [10, 5]} 330 | ]); 331 | 332 | var planeBounds = [-180, -90, 180, 90]; 333 | bbox = [-180, 28, 180, 17]; 334 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 335 | "&plane_bounds=" + planeBounds.join(",")); 336 | TEquals( 337 | ['0','1','7','8','9','wrap1','wrap2','wrap3','wrap7','wrap8','wrap9'], 338 | extract_ids(xhr.responseText), "bbox that spans the poles"); 339 | 340 | bbox = [-10, -90, -20, 90]; 341 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 342 | "&plane_bounds=" + planeBounds.join(",")); 343 | TEquals( 344 | ['0','5','6','7','8','9','wrap1','wrap3','wrap4','wrap6','wrap7','wrap9'], 345 | extract_ids(xhr.responseText), "bbox that spans the date line"); 346 | 347 | bbox = [-10, 28, -20, 17]; 348 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 349 | "&plane_bounds=" + planeBounds.join(",")); 350 | TEquals(['0','7','8','9','wrap1','wrap3','wrap7','wrap9'], 351 | extract_ids(xhr.responseText), "bbox that spans the date line and poles"); 352 | 353 | // try plane bounds that are smaller than the bounding box 354 | planeBounds = [-20, -20, 20, 20]; 355 | bbox = [-180, 28, 180, -28]; 356 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 357 | "&plane_bounds=" + planeBounds.join(",")); 358 | TEquals( 359 | [], extract_ids(xhr.responseText), 360 | "bbox that would span the poles, but is outside of the plane bounds"); 361 | 362 | bbox = [28, -90, -28, 90]; 363 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 364 | "&plane_bounds=" + planeBounds.join(",")); 365 | TEquals( 366 | [], extract_ids(xhr.responseText), 367 | "bbox that would spans the date line, but is outside of the plane bounds"); 368 | 369 | bbox = [28, 28, -28, -28]; 370 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 371 | "&plane_bounds=" + planeBounds.join(",")); 372 | TEquals([], extract_ids(xhr.responseText), 373 | "bbox that would span the date line and poles, but is outside of the" + 374 | "plane bounds"); 375 | 376 | // try other plane bounds (but with bbox which is smaller) 377 | planeBounds = [-25, -25, 25, 25]; 378 | bbox = [-25, 24, 25, 6]; 379 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 380 | "&plane_bounds=" + planeBounds.join(",")); 381 | TEquals( 382 | ['5', 'wrap8','wrap9'], 383 | extract_ids(xhr.responseText), "bbox that spans the poles"); 384 | 385 | bbox = [11, -25, -10, 25]; 386 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 387 | "&plane_bounds=" + planeBounds.join(",")); 388 | TEquals( 389 | ['0','1','2','3','4','5','wrap5','wrap8'], 390 | extract_ids(xhr.responseText), "bbox that spans the date line"); 391 | 392 | bbox = [11, 24, -10, 6]; 393 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",") + 394 | "&plane_bounds=" + planeBounds.join(",")); 395 | TEquals(['5','wrap8'], 396 | extract_ids(xhr.responseText), "bbox that spans the date line and poles"); 397 | 398 | // Try flipped bounding box without plane bounds 399 | bbox = [11, 24, -10, 6]; 400 | xhr = CouchDB.request("GET", url_pre + "basicIndex?bbox=" + bbox.join(",")); 401 | TEquals(400, xhr.status, 402 | "flipped bbox without plane bounds should return an error"); 403 | }; 404 | -------------------------------------------------------------------------------- /share/www/script/test/spatial_merging.js: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | // use this file except in compliance with the License. You may obtain a copy of 3 | // the License at 4 | // 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // 7 | // Unless required by applicable law or agreed to in writing, software 8 | // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | // License for the specific language governing permissions and limitations under 11 | // the License. 12 | 13 | couchTests.spatial_merging = function(debug) { 14 | if (debug) debugger; 15 | 16 | function newDb(name) { 17 | var db = new CouchDB(name, {"X-Couch-Full-Commit": "false"}); 18 | db.deleteDb(); 19 | db.createDb(); 20 | 21 | return db; 22 | } 23 | 24 | function dbUri(db) { 25 | return CouchDB.protocol + CouchDB.host + '/' + db.name; 26 | } 27 | 28 | function populateAlternated(dbs, docs) { 29 | var docIdx = 0; 30 | 31 | while (docIdx < docs.length) { 32 | for (var i = 0; (i < dbs.length) && (docIdx < docs.length); i++) { 33 | var db = dbs[i]; 34 | var doc = docs[docIdx]; 35 | 36 | TEquals(true, db.save(doc).ok); 37 | docIdx += 1; 38 | } 39 | } 40 | } 41 | 42 | function populateSequenced(dbs, listOfDocLists) { 43 | for (var i = 0, j = 0; (i < dbs.length) && (j < listOfDocLists.length); i++, j++) { 44 | var db = dbs[i]; 45 | var docList = listOfDocLists[j]; 46 | 47 | for (var k = 0; k < docList.length; k++) { 48 | var doc = docList[k]; 49 | TEquals(true, db.save(doc).ok); 50 | } 51 | } 52 | } 53 | 54 | function addDoc(dbs, doc) { 55 | for (var i = 0; i < dbs.length; i++) { 56 | TEquals(true, dbs[i].save(doc).ok); 57 | delete doc._rev; 58 | } 59 | } 60 | 61 | function mergedQuery(dbs, spatialName, options, sort) { 62 | var body = { 63 | "spatial": {} 64 | }; 65 | 66 | options = options || {}; 67 | 68 | for (var i = 0; i < dbs.length; i++) { 69 | if (typeof dbs[i] === "string") { 70 | body.spatial[dbs[i]] = spatialName; 71 | } else { 72 | body.spatial[dbs[i].name] = spatialName; 73 | } 74 | } 75 | 76 | var qs = ""; 77 | 78 | for (var q in options) { 79 | if (q === "connection_timeout") { 80 | body["connection_timeout"] = options[q]; 81 | continue; 82 | } 83 | if (q === "on_error") { 84 | body["on_error"] = options[q]; 85 | continue; 86 | } 87 | if (qs !== "") { 88 | qs = qs + "&"; 89 | } 90 | qs = qs + String(q) + "=" + String(options[q]); 91 | } 92 | 93 | qs = "?bbox=-1800,-900,1800,900&" + qs; 94 | 95 | var xhr = CouchDB.request("POST", "/_spatial_merge" + qs, { 96 | headers: { 97 | "Content-Type": "application/json" 98 | }, 99 | body: JSON.stringify(body) 100 | }); 101 | TEquals(200, xhr.status); 102 | 103 | var resp = JSON.parse(xhr.responseText); 104 | // results from a spatial request are not sorted, sort them client sided 105 | if (sort!==false) { 106 | resp.rows = resp.rows.sort(sortById); 107 | } 108 | return resp; 109 | } 110 | 111 | function sortById(a, b) { 112 | var aId = parseInt(a.id); 113 | var bId = parseInt(b.id); 114 | if (aId < bId) return -1; 115 | else if (aId > bId) return 1; 116 | return 0; 117 | } 118 | 119 | function wait(ms) { 120 | var t0 = new Date(), t1; 121 | do { 122 | CouchDB.request("GET", "/"); 123 | t1 = new Date(); 124 | } while ((t1 - t0) <= ms); 125 | } 126 | 127 | function compareSpatialResults(resultA, resultB) { 128 | TEquals(resultA.rows.length, resultB.rows.length, "same # of rows"); 129 | 130 | for (var i = 0; i < resultA.rows.length; i++) { 131 | var a = resultA.rows[i]; 132 | var b = resultB.rows[i]; 133 | var docA = a.doc || null; 134 | var docB = b.doc || null; 135 | 136 | TEquals(JSON.stringify(a.bbox), JSON.stringify(b.bbox), "keys are equal"); 137 | TEquals(JSON.stringify(a.value), JSON.stringify(b.value), 138 | "values are equal"); 139 | TEquals(JSON.stringify(docA), JSON.stringify(docB), "docs are equal"); 140 | } 141 | } 142 | 143 | function makeSpatialDocs(start, end, templateDoc) { 144 | var docs = makeDocs(start, end, templateDoc); 145 | for (var i=0; i 44 | case gen_server:start_link(couch_spatial_group, 45 | {InitArgs, self(), Ref = make_ref()}, []) of 46 | {ok, Pid} -> 47 | {ok, Pid}; 48 | ignore -> 49 | receive 50 | {Ref, Pid, Error} -> 51 | case process_info(self(), trap_exit) of 52 | {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end; 53 | {trap_exit, false} -> ok 54 | end, 55 | Error 56 | end; 57 | Error -> 58 | Error 59 | end. 60 | 61 | % api methods 62 | request_group(Pid, Seq) -> 63 | ?LOG_DEBUG("request_group {Pid, Seq} ~p", [{Pid, Seq}]), 64 | case gen_server:call(Pid, {request_group, Seq}, infinity) of 65 | {ok, Group, RefCounter} -> 66 | couch_ref_counter:add(RefCounter), 67 | {ok, Group}; 68 | Error -> 69 | ?LOG_DEBUG("request_group Error ~p", [Error]), 70 | throw(Error) 71 | end. 72 | 73 | 74 | 75 | init({InitArgs, ReturnPid, Ref}) -> 76 | process_flag(trap_exit, true), 77 | case prepare_group(InitArgs, false) of 78 | {ok, #spatial_group{db=Db, fd=Fd, current_seq=Seq}=Group} -> 79 | case Seq > couch_db:get_update_seq(Db) of 80 | true -> 81 | ReturnPid ! {Ref, self(), {error, invalid_view_seq}}, 82 | ignore; 83 | _ -> 84 | couch_db:monitor(Db), 85 | {ok, RefCounter} = couch_ref_counter:start([Fd]), 86 | {ok, #group_state{ 87 | db_name=couch_db:name(Db), 88 | init_args=InitArgs, 89 | group=Group, 90 | ref_counter=RefCounter}} 91 | end; 92 | Error -> 93 | ReturnPid ! {Ref, self(), Error}, 94 | ignore 95 | end. 96 | 97 | % NOTE vmx: There's a lenghy comment about this call in couch_view_group.erl 98 | handle_call({request_group, RequestSeq}, From, 99 | #group_state{ 100 | db_name=DbName, 101 | group=#spatial_group{current_seq=GroupSeq}=Group, 102 | updater_pid=nil, 103 | waiting_list=WaitList 104 | }=State) when RequestSeq > GroupSeq -> 105 | {ok, Db} = couch_db:open_int(DbName, []), 106 | Group2 = Group#spatial_group{db=Db}, 107 | Owner = self(), 108 | Pid = spawn_link(fun()-> couch_spatial_updater:update(Owner, Group2) end), 109 | 110 | {noreply, State#group_state{ 111 | updater_pid=Pid, 112 | group=Group2, 113 | waiting_list=[{From,RequestSeq}|WaitList] 114 | }, infinity}; 115 | 116 | % If the request seqence is less than or equal to the seq_id of a known Group, 117 | % we respond with that Group. 118 | handle_call({request_group, RequestSeq}, _From, #group_state{ 119 | group = #spatial_group{current_seq=GroupSeq} = Group, 120 | ref_counter = RefCounter 121 | } = State) when RequestSeq =< GroupSeq -> 122 | ?LOG_DEBUG("(2) request_group handler: seqs: req: ~p, group: ~p", [RequestSeq, GroupSeq]), 123 | {reply, {ok, Group, RefCounter}, State}; 124 | 125 | % Otherwise: TargetSeq => RequestSeq > GroupSeq 126 | % We've already initiated the appropriate action, so just hold the response until the group is up to the RequestSeq 127 | handle_call({request_group, RequestSeq}, From, 128 | #group_state{waiting_list=WaitList}=State) -> 129 | ?LOG_DEBUG("(3) request_group handler: seqs: req: ~p", [RequestSeq]), 130 | {noreply, State#group_state{ 131 | waiting_list=[{From, RequestSeq}|WaitList] 132 | }, infinity}; 133 | 134 | handle_call(request_group_info, _From, State) -> 135 | GroupInfo = get_group_info(State), 136 | {reply, {ok, GroupInfo}, State}. 137 | 138 | handle_cast({start_compact, CompactFun}, #group_state{compactor_pid=nil} 139 | = State) -> 140 | #group_state{ 141 | group = #spatial_group{name = GroupId, sig = GroupSig} = Group, 142 | init_args = {RootDir, DbName, _} 143 | } = State, 144 | ?LOG_INFO("Spatial index compaction starting for ~s ~s", 145 | [DbName, GroupId]), 146 | {ok, Db} = couch_db:open_int(DbName, []), 147 | {ok, Fd} = open_index_file(compact, RootDir, DbName, GroupSig), 148 | NewGroup = reset_file(Db, Fd, DbName, Group), 149 | CompactFd = NewGroup#spatial_group.fd, 150 | unlink(CompactFd), 151 | Pid = spawn_link(fun() -> 152 | link(CompactFd), 153 | CompactFun(Group, NewGroup, DbName), 154 | unlink(CompactFd) 155 | end), 156 | {noreply, State#group_state{compactor_pid = Pid}}; 157 | handle_cast({start_compact, _}, State) -> 158 | %% compact already running, this is a no-op 159 | {noreply, State}; 160 | 161 | handle_cast({compact_done, #spatial_group{current_seq=NewSeq} = NewGroup}, 162 | #group_state{group = #spatial_group{current_seq=OldSeq}} = State) 163 | when NewSeq >= OldSeq -> 164 | #group_state{ 165 | group = #spatial_group{name=GroupId, fd=OldFd, sig=GroupSig} = Group, 166 | init_args = {RootDir, DbName, _}, 167 | updater_pid = UpdaterPid, 168 | ref_counter = RefCounter 169 | } = State, 170 | 171 | if is_pid(UpdaterPid) -> 172 | couch_util:shutdown_sync(UpdaterPid); 173 | true -> 174 | ok 175 | end, 176 | 177 | ?LOG_INFO("Spatial index compaction complete for ~s ~s", 178 | [DbName, GroupId]), 179 | FileName = index_file_name(RootDir, DbName, GroupSig), 180 | ok = couch_file:only_snapshot_reads(OldFd), 181 | ok = couch_file:delete(RootDir, FileName), 182 | ok = couch_file:rename(NewGroup#spatial_group.fd, FileName), 183 | 184 | %% if an updater is running, kill it and start a new one 185 | NewUpdaterPid = 186 | if is_pid(UpdaterPid) -> 187 | Owner = self(), 188 | spawn_link(fun()-> couch_spatial_updater:update(Owner, NewGroup) end); 189 | true -> 190 | nil 191 | end, 192 | 193 | %% cleanup old group 194 | unlink(OldFd), 195 | couch_ref_counter:drop(RefCounter), 196 | {ok, NewRefCounter} = couch_ref_counter:start([NewGroup#spatial_group.fd]), 197 | case Group#spatial_group.db of 198 | nil -> ok; 199 | Else -> couch_db:close(Else) 200 | end, 201 | 202 | self() ! delayed_commit, 203 | {noreply, State#group_state{ 204 | group=NewGroup, 205 | ref_counter=NewRefCounter, 206 | compactor_pid=nil, 207 | updater_pid=NewUpdaterPid 208 | }}; 209 | handle_cast({compact_done, NewGroup}, State) -> 210 | #group_state{ 211 | group = #spatial_group{name = GroupId, current_seq = CurrentSeq}, 212 | init_args={_RootDir, DbName, _} 213 | } = State, 214 | ?LOG_INFO("Spatial index compaction still behind for ~s ~s -- " ++ 215 | "current: ~p compact: ~p", 216 | [DbName, GroupId, CurrentSeq, NewGroup#spatial_group.current_seq]), 217 | couch_db:close(NewGroup#spatial_group.db), 218 | {ok, Db} = couch_db:open_int(DbName, []), 219 | Pid = spawn_link(fun() -> 220 | {_,Ref} = erlang:spawn_monitor(fun() -> 221 | couch_spatial_updater:update(nil, NewGroup#spatial_group{db = Db}) 222 | end), 223 | receive 224 | {'DOWN', Ref, _, _, {new_group, NewGroup2}} -> 225 | #spatial_group{name=GroupId} = NewGroup2, 226 | Pid2 = couch_spatial:get_group_server(DbName, GroupId), 227 | gen_server:cast(Pid2, {compact_done, NewGroup2}) 228 | end 229 | end), 230 | {noreply, State#group_state{compactor_pid = Pid}}; 231 | 232 | 233 | handle_cast({partial_update, Pid, NewGroup}, #group_state{updater_pid=Pid} 234 | = State) -> 235 | #group_state{ 236 | db_name = DbName, 237 | waiting_commit = WaitingCommit 238 | } = State, 239 | NewSeq = NewGroup#spatial_group.current_seq, 240 | ?LOG_INFO("checkpointing spatial update at seq ~p for ~s ~s", [NewSeq, 241 | DbName, NewGroup#spatial_group.name]), 242 | if not WaitingCommit -> 243 | erlang:send_after(1000, self(), delayed_commit); 244 | true -> ok 245 | end, 246 | {noreply, State#group_state{group=NewGroup, waiting_commit=true}}; 247 | handle_cast(_Msg, State) -> 248 | {noreply, State}. 249 | 250 | handle_info(delayed_commit, #group_state{db_name=DbName,group=Group}=State) -> 251 | {ok, Db} = couch_db:open_int(DbName, []), 252 | CommittedSeq = couch_db:get_committed_update_seq(Db), 253 | couch_db:close(Db), 254 | if CommittedSeq >= Group#spatial_group.current_seq -> 255 | % save the header 256 | Header = {Group#spatial_group.sig, get_index_header_data(Group)}, 257 | ok = couch_file:write_header(Group#spatial_group.fd, Header), 258 | ok = couch_file:flush(Group#spatial_group.fd), 259 | {noreply, State#group_state{waiting_commit=false}}; 260 | true -> 261 | % We can't commit the header because the database seq that's fully 262 | % committed to disk is still behind us. If we committed now and the 263 | % database lost those changes our view could be forever out of sync 264 | % with the database. But a crash before we commit these changes, no big 265 | % deal, we only lose incremental changes since last committal. 266 | erlang:send_after(1000, self(), delayed_commit), 267 | {noreply, State#group_state{waiting_commit=true}} 268 | end; 269 | 270 | handle_info({'EXIT', FromPid, {new_group, #spatial_group{db=Db}=Group}}, 271 | #group_state{db_name=DbName, 272 | updater_pid=UpPid, 273 | ref_counter=RefCounter, 274 | waiting_list=WaitList, 275 | waiting_commit=WaitingCommit}=State) when UpPid == FromPid -> 276 | ok = couch_db:close(Db), 277 | if not WaitingCommit -> 278 | erlang:send_after(1000, self(), delayed_commit); 279 | true -> ok 280 | end, 281 | case reply_with_group(Group, WaitList, [], RefCounter) of 282 | [] -> 283 | {noreply, State#group_state{waiting_commit=true, waiting_list=[], 284 | group=Group#spatial_group{db=nil}, updater_pid=nil}}; 285 | StillWaiting -> 286 | % we still have some waiters, reopen the database and reupdate the index 287 | {ok, Db2} = couch_db:open_int(DbName, []), 288 | Group2 = Group#spatial_group{db=Db2}, 289 | Owner = self(), 290 | Pid = spawn_link(fun() -> couch_view_updater:update(Owner, Group2) end), 291 | {noreply, State#group_state{waiting_commit=true, 292 | waiting_list=StillWaiting, group=Group2, updater_pid=Pid}} 293 | end; 294 | 295 | handle_info({'EXIT', _FromPid, normal}, State) -> 296 | {noreply, State}; 297 | 298 | handle_info({'EXIT', FromPid, {{nocatch, Reason}, _Trace}}, State) -> 299 | ?LOG_DEBUG("Uncaught throw() in linked pid: ~p", [{FromPid, Reason}]), 300 | {stop, Reason, State}; 301 | 302 | handle_info({'EXIT', FromPid, Reason}, State) -> 303 | ?LOG_DEBUG("Exit from linked pid: ~p", [{FromPid, Reason}]), 304 | {stop, Reason, State}; 305 | 306 | % Shutting down will trigger couch_spatial:handle_info(EXIT...) 307 | handle_info({'DOWN',_,_,_,_}, State) -> 308 | ?LOG_INFO("Shutting down spatial group server, monitored db is closing.", []), 309 | {stop, normal, reply_all(State, shutdown)}; 310 | 311 | handle_info(_Msg, Server) -> 312 | {noreply, Server}. 313 | 314 | terminate(_Reason, _Srv) -> 315 | ok. 316 | 317 | code_change(_OldVsn, State, _Extra) -> 318 | {ok, State}. 319 | 320 | % reply_with_group/3 321 | % for each item in the WaitingList {Pid, Seq} 322 | % if the Seq is =< GroupSeq, reply 323 | reply_with_group(Group=#spatial_group{current_seq=GroupSeq}, [{Pid, Seq}|WaitList], 324 | StillWaiting, RefCounter) when Seq =< GroupSeq -> 325 | gen_server:reply(Pid, {ok, Group, RefCounter}), 326 | reply_with_group(Group, WaitList, StillWaiting, RefCounter); 327 | 328 | % else 329 | % put it in the continuing waiting list 330 | reply_with_group(Group, [{Pid, Seq}|WaitList], StillWaiting, RefCounter) -> 331 | reply_with_group(Group, WaitList, [{Pid, Seq}|StillWaiting], RefCounter); 332 | 333 | % return the still waiting list 334 | reply_with_group(_Group, [], StillWaiting, _RefCounter) -> 335 | StillWaiting. 336 | 337 | reply_all(#group_state{waiting_list=WaitList}=State, Reply) -> 338 | [catch gen_server:reply(Pid, Reply) || {Pid, _} <- WaitList], 339 | State#group_state{waiting_list=[]}. 340 | 341 | open_db_group(DbName, GroupId) -> 342 | case couch_db:open_int(DbName, []) of 343 | {ok, Db} -> 344 | case couch_db:open_doc(Db, GroupId, [ejson_body]) of 345 | {ok, Doc} -> 346 | couch_db:close(Db), 347 | {ok, design_doc_to_spatial_group(Doc)}; 348 | Else -> 349 | couch_db:close(Db), 350 | Else 351 | end; 352 | Else -> 353 | Else 354 | end. 355 | 356 | 357 | design_doc_to_spatial_group(Doc) -> 358 | #doc{id=Id, body={Fields}} = couch_doc:with_ejson_body(Doc), 359 | Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>), 360 | {DesignOptions} = couch_util:get_value(<<"options">>, Fields, {[]}), 361 | {RawIndexes} = couch_util:get_value(<<"spatial">>, Fields, {[]}), 362 | % RawViews is only needed to get the "lib" property 363 | {RawViews} = couch_util:get_value(<<"views">>, Fields, {[]}), 364 | Lib = couch_util:get_value(<<"lib">>, RawViews, {[]}), 365 | 366 | % add the views to a dictionary object, with the map source as the key 367 | DictBySrc = 368 | lists:foldl(fun({Name, IndexSrc}, DictBySrcAcc) -> 369 | Index = 370 | case dict:find({IndexSrc}, DictBySrcAcc) of 371 | {ok, Index0} -> Index0; 372 | error -> #spatial{def=IndexSrc} % create new spatial index object 373 | end, 374 | Index2 = Index#spatial{index_names=[Name|Index#spatial.index_names]}, 375 | dict:store({IndexSrc}, Index2, DictBySrcAcc) 376 | end, dict:new(), RawIndexes), 377 | % number the views 378 | {Indexes, _N} = lists:mapfoldl( 379 | fun({_Src, Index}, N) -> 380 | {Index#spatial{id_num=N},N+1} 381 | end, 0, lists:sort(dict:to_list(DictBySrc))), 382 | set_index_sig(#spatial_group{name=Id, lib=Lib, indexes=Indexes, 383 | def_lang=Language, design_options=DesignOptions}). 384 | 385 | set_index_sig(#spatial_group{ 386 | indexes=Indexes, 387 | lib={[]}, 388 | def_lang=Language, 389 | design_options=DesignOptions}=G) -> 390 | IndexInfo = [I#spatial{update_seq=0, purge_seq=0} || I <- Indexes], 391 | G#spatial_group{sig=couch_util:md5(term_to_binary( 392 | {IndexInfo, Language, DesignOptions, ?LATEST_SPATIAL_DISK_VERSION}))}; 393 | set_index_sig(#spatial_group{ 394 | indexes=Indexes, 395 | lib=Lib, 396 | def_lang=Language, 397 | design_options=DesignOptions}=G) -> 398 | IndexInfo = [I#spatial{update_seq=0, purge_seq=0} || I <- Indexes], 399 | G#spatial_group{sig=couch_util:md5(term_to_binary( 400 | {IndexInfo, Language, DesignOptions, ?LATEST_SPATIAL_DISK_VERSION, 401 | geocouch_duplicates:sort_lib(Lib)}))}. 402 | 403 | 404 | prepare_group({RootDir, DbName, #spatial_group{sig=Sig}=Group}, ForceReset)-> 405 | case couch_db:open_int(DbName, []) of 406 | {ok, Db} -> 407 | case open_index_file(RootDir, DbName, Sig) of 408 | {ok, Fd} -> 409 | if ForceReset -> 410 | % this can happen if we missed a purge 411 | {ok, reset_file(Db, Fd, DbName, Group)}; 412 | true -> 413 | case (catch couch_file:read_header(Fd)) of 414 | {ok, {Sig, HeaderInfo}} -> 415 | % sigs match! 416 | {ok, init_group(Db, Fd, Group, HeaderInfo)}; 417 | _ -> 418 | % this happens on a new file 419 | {ok, reset_file(Db, Fd, DbName, Group)} 420 | end 421 | end; 422 | Error -> 423 | catch delete_index_file(RootDir, DbName, Sig), 424 | Error 425 | end; 426 | Else -> 427 | Else 428 | end. 429 | 430 | get_index_header_data(#spatial_group{current_seq=Seq, purge_seq=PurgeSeq, 431 | id_btree=IdBtree,indexes=Indexes}) -> 432 | % Fill out an empty record with the information we need to persist on disk 433 | IndexStates = [ 434 | #spatial{ 435 | treepos=I#spatial.treepos, 436 | treeheight=I#spatial.treeheight, 437 | update_seq=I#spatial.update_seq, 438 | purge_seq=I#spatial.purge_seq} || 439 | I <- Indexes 440 | ], 441 | #spatial_index_header{ 442 | seq=Seq, 443 | purge_seq=PurgeSeq, 444 | id_btree_state=couch_btree:get_state(IdBtree), 445 | index_states=IndexStates 446 | }. 447 | 448 | delete_index_file(RootDir, DbName, GroupSig) -> 449 | file:delete(index_file_name(RootDir, DbName, GroupSig)). 450 | 451 | index_file_name(RootDir, DbName, GroupSig) -> 452 | couch_view_group:design_root(RootDir, DbName) ++ 453 | couch_util:to_hex(?b2l(GroupSig)) ++".spatial". 454 | 455 | index_file_name(compact, RootDir, DbName, GroupSig) -> 456 | couch_view_group:design_root(RootDir, DbName) ++ 457 | couch_util:to_hex(?b2l(GroupSig)) ++".compact.spatial". 458 | 459 | 460 | open_index_file(RootDir, DbName, GroupSig) -> 461 | FileName = index_file_name(RootDir, DbName, GroupSig), 462 | case couch_file:open(FileName) of 463 | {ok, Fd} -> {ok, Fd}; 464 | {error, enoent} -> couch_file:open(FileName, [create]); 465 | Error -> Error 466 | end. 467 | 468 | open_index_file(compact, RootDir, DbName, GroupSig) -> 469 | FileName = index_file_name(compact, RootDir, DbName, GroupSig), 470 | case couch_file:open(FileName) of 471 | {ok, Fd} -> {ok, Fd}; 472 | {error, enoent} -> couch_file:open(FileName, [create]); 473 | Error -> Error 474 | end. 475 | 476 | get_group_info(State) -> 477 | #group_state{ 478 | group=Group, 479 | updater_pid=UpdaterPid, 480 | compactor_pid=CompactorPid, 481 | waiting_commit=WaitingCommit, 482 | waiting_list=WaitersList 483 | } = State, 484 | #spatial_group{ 485 | fd = Fd, 486 | sig = GroupSig, 487 | def_lang = Lang, 488 | current_seq=CurrentSeq, 489 | purge_seq=PurgeSeq 490 | } = Group, 491 | {ok, Size} = couch_file:bytes(Fd), 492 | [ 493 | {signature, ?l2b(couch_util:to_hex(?b2l(GroupSig)))}, 494 | {language, Lang}, 495 | {disk_size, Size}, 496 | {updater_running, UpdaterPid /= nil}, 497 | {compact_running, CompactorPid /= nil}, 498 | {waiting_commit, WaitingCommit}, 499 | {waiting_clients, length(WaitersList)}, 500 | {update_seq, CurrentSeq}, 501 | {purge_seq, PurgeSeq} 502 | ]. 503 | 504 | reset_group(#spatial_group{indexes=Indexes}=Group) -> 505 | Indexes2 = [Index#spatial{treepos=nil,treeheight=0} || Index <- Indexes], 506 | Group#spatial_group{db=nil,fd=nil,current_seq=0,indexes=Indexes2}. 507 | 508 | reset_file(Db, Fd, DbName, #spatial_group{sig=Sig,name=Name} = Group) -> 509 | ?LOG_DEBUG("Resetting spatial group index \"~s\" in db ~s", [Name, DbName]), 510 | ok = couch_file:truncate(Fd, 0), 511 | ok = couch_file:write_header(Fd, {Sig, nil}), 512 | ok = couch_file:flush(Fd), 513 | init_group(Db, Fd, reset_group(Group), nil). 514 | 515 | 516 | init_group(Db, Fd, #spatial_group{indexes=Indexes}=Group, nil) -> 517 | init_group(Db, Fd, Group, 518 | #spatial_index_header{seq=0, purge_seq=couch_db:get_purge_seq(Db), 519 | id_btree_state=nil, index_states=[#spatial{} || _ <- Indexes]}); 520 | init_group(Db, Fd, #spatial_group{indexes=Indexes}=Group, IndexHeader) -> 521 | #spatial_index_header{ 522 | seq=Seq, 523 | purge_seq=PurgeSeq, 524 | id_btree_state=IdBtreeState, 525 | index_states=IndexStates 526 | } = IndexHeader, 527 | {ok, IdBtree} = couch_btree:open(IdBtreeState, Fd), 528 | Indexes2 = lists:zipwith( 529 | fun(State, Index) -> 530 | Index#spatial{ 531 | treepos=State#spatial.treepos, 532 | treeheight=State#spatial.treeheight, 533 | update_seq=State#spatial.update_seq, 534 | purge_seq=State#spatial.purge_seq, 535 | fd=Fd} 536 | end, 537 | IndexStates, Indexes), 538 | Group#spatial_group{db=Db, fd=Fd, current_seq=Seq, purge_seq=PurgeSeq, 539 | id_btree=IdBtree, indexes=Indexes2}. 540 | --------------------------------------------------------------------------------